From efe1135d2841d219a88a7f3c97e399d741bf1137 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 9 Feb 2024 16:55:30 +0100 Subject: [PATCH 001/357] Update Reply protocol definition and codec (#717) * Update Reply protocol definition and codec * Make consolidation a flag in Query/Reply * Fix wrong Consolidation cast in codec * Apply Reply changes to routing * Fix shared-memory feature * Fix stats * Bump Zenoh Protocol Version * Add query/reply ok(put|del)/err() tests --- commons/zenoh-codec/src/zenoh/mod.rs | 4 +- commons/zenoh-codec/src/zenoh/query.rs | 70 +++++----- commons/zenoh-codec/src/zenoh/reply.rs | 141 +++----------------- commons/zenoh-codec/tests/codec.rs | 2 +- commons/zenoh-protocol/src/lib.rs | 2 +- commons/zenoh-protocol/src/zenoh/mod.rs | 10 +- commons/zenoh-protocol/src/zenoh/query.rs | 25 ++-- commons/zenoh-protocol/src/zenoh/reply.rs | 90 +++---------- io/zenoh-transport/src/common/stats.rs | 8 ++ io/zenoh-transport/src/shm.rs | 17 +-- zenoh/src/net/routing/dispatcher/pubsub.rs | 13 +- zenoh/src/net/routing/dispatcher/queries.rs | 54 +++++--- zenoh/src/queryable.rs | 81 ++++++----- zenoh/src/session.rs | 74 +++++++--- zenoh/tests/session.rs | 71 +++++++++- 15 files changed, 329 insertions(+), 333 deletions(-) diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index 2e3ea48be7..d59add9d63 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -121,8 +121,8 @@ where fn write(self, writer: &mut W, x: &ResponseBody) -> Self::Output { match x { ResponseBody::Reply(b) => self.write(&mut *writer, b), - ResponseBody::Err(b) => self.write(&mut *writer, b), ResponseBody::Ack(b) => self.write(&mut *writer, b), + ResponseBody::Err(b) => self.write(&mut *writer, b), ResponseBody::Put(b) => self.write(&mut *writer, b), } } @@ -140,8 +140,8 @@ where let codec = Zenoh080Header::new(header); let body = match imsg::mid(codec.header) { id::REPLY => ResponseBody::Reply(codec.read(&mut *reader)?), - id::ERR => ResponseBody::Err(codec.read(&mut *reader)?), id::ACK => ResponseBody::Ack(codec.read(&mut *reader)?), + id::ERR => ResponseBody::Err(codec.read(&mut *reader)?), id::PUT => ResponseBody::Put(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index 09b01b2266..cb0506e474 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -22,48 +22,46 @@ use zenoh_protocol::{ common::{iext, imsg}, zenoh::{ id, - query::{ext, flag, Query}, + query::{ext, flag, Consolidation, Query}, }, }; -// Extension Consolidation -impl WCodec<(ext::ConsolidationType, bool), &mut W> for Zenoh080 +// Consolidation +impl WCodec for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: (ext::ConsolidationType, bool)) -> Self::Output { - let (x, more) = x; + fn write(self, writer: &mut W, x: Consolidation) -> Self::Output { let v: u64 = match x { - ext::ConsolidationType::Auto => 0, - ext::ConsolidationType::None => 1, - ext::ConsolidationType::Monotonic => 2, - ext::ConsolidationType::Latest => 3, - ext::ConsolidationType::Unique => 4, + Consolidation::Auto => 0, + Consolidation::None => 1, + Consolidation::Monotonic => 2, + Consolidation::Latest => 3, + Consolidation::Unique => 4, }; - let v = ext::Consolidation::new(v); - self.write(&mut *writer, (&v, more)) + self.write(&mut *writer, v) } } -impl RCodec<(ext::ConsolidationType, bool), &mut R> for Zenoh080Header +impl RCodec for Zenoh080 where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result<(ext::ConsolidationType, bool), Self::Error> { - let (ext, more): (ext::Consolidation, bool) = self.read(&mut *reader)?; - let c = match ext.value { - 0 => ext::ConsolidationType::Auto, - 1 => ext::ConsolidationType::None, - 2 => ext::ConsolidationType::Monotonic, - 3 => ext::ConsolidationType::Latest, - 4 => ext::ConsolidationType::Unique, - _ => return Err(DidntRead), + fn read(self, reader: &mut R) -> Result { + let v: u64 = self.read(&mut *reader)?; + let c = match v { + 0 => Consolidation::Auto, + 1 => Consolidation::None, + 2 => Consolidation::Monotonic, + 3 => Consolidation::Latest, + 4 => Consolidation::Unique, + _ => Consolidation::Auto, // Fallback on Auto if Consolidation is unknown }; - Ok((c, more)) + Ok(c) } } @@ -75,9 +73,9 @@ where fn write(self, writer: &mut W, x: &Query) -> Self::Output { let Query { + consolidation, parameters, ext_sinfo, - ext_consolidation, ext_body, ext_attachment, ext_unknown, @@ -85,11 +83,13 @@ where // Header let mut header = id::QUERY; + if consolidation != &Consolidation::default() { + header |= flag::C; + } if !parameters.is_empty() { header |= flag::P; } let mut n_exts = (ext_sinfo.is_some() as u8) - + ((ext_consolidation != &ext::ConsolidationType::default()) as u8) + (ext_body.is_some() as u8) + (ext_attachment.is_some() as u8) + (ext_unknown.len() as u8); @@ -99,6 +99,9 @@ where self.write(&mut *writer, header)?; // Body + if consolidation != &Consolidation::default() { + self.write(&mut *writer, *consolidation)?; + } if !parameters.is_empty() { self.write(&mut *writer, parameters)?; } @@ -108,10 +111,6 @@ where n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - if ext_consolidation != &ext::ConsolidationType::default() { - n_exts -= 1; - self.write(&mut *writer, (*ext_consolidation, n_exts != 0))?; - } if let Some(body) = ext_body.as_ref() { n_exts -= 1; self.write(&mut *writer, (body, n_exts != 0))?; @@ -154,6 +153,11 @@ where } // Body + let mut consolidation = Consolidation::default(); + if imsg::has_flag(self.header, flag::C) { + consolidation = self.codec.read(&mut *reader)?; + } + let mut parameters = String::new(); if imsg::has_flag(self.header, flag::P) { parameters = self.codec.read(&mut *reader)?; @@ -161,7 +165,6 @@ where // Extensions let mut ext_sinfo: Option = None; - let mut ext_consolidation = ext::ConsolidationType::default(); let mut ext_body: Option = None; let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); @@ -176,11 +179,6 @@ where ext_sinfo = Some(s); has_ext = ext; } - ext::Consolidation::ID => { - let (c, ext): (ext::ConsolidationType, bool) = eodec.read(&mut *reader)?; - ext_consolidation = c; - has_ext = ext; - } ext::QueryBodyType::SID | ext::QueryBodyType::VID => { let (s, ext): (ext::QueryBodyType, bool) = eodec.read(&mut *reader)?; ext_body = Some(s); @@ -200,9 +198,9 @@ where } Ok(Query { + consolidation, parameters, ext_sinfo, - ext_consolidation, ext_body, ext_attachment, ext_unknown, diff --git a/commons/zenoh-codec/src/zenoh/reply.rs b/commons/zenoh-codec/src/zenoh/reply.rs index d98c72b341..d54e98cc5e 100644 --- a/commons/zenoh-codec/src/zenoh/reply.rs +++ b/commons/zenoh-codec/src/zenoh/reply.rs @@ -11,23 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(not(feature = "shared-memory"))] -use crate::Zenoh080Bounded; -#[cfg(feature = "shared-memory")] -use crate::Zenoh080Sliced; use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, - ZBuf, }; use zenoh_protocol::{ - common::{iext, imsg}, - core::Encoding, + common::imsg, zenoh::{ id, - reply::{ext, flag, Reply}, + query::Consolidation, + reply::{flag, Reply, ReplyBody}, }, }; @@ -39,81 +34,35 @@ where fn write(self, writer: &mut W, x: &Reply) -> Self::Output { let Reply { - timestamp, - encoding, - ext_sinfo, - ext_consolidation, - #[cfg(feature = "shared-memory")] - ext_shm, - ext_attachment, + consolidation, ext_unknown, payload, } = x; // Header let mut header = id::REPLY; - if timestamp.is_some() { - header |= flag::T; - } - if encoding != &Encoding::default() { - header |= flag::E; - } - let mut n_exts = (ext_sinfo.is_some()) as u8 - + ((ext_consolidation != &ext::ConsolidationType::default()) as u8) - + (ext_attachment.is_some()) as u8 - + (ext_unknown.len() as u8); - #[cfg(feature = "shared-memory")] - { - n_exts += ext_shm.is_some() as u8; + if consolidation != &Consolidation::default() { + header |= flag::C; } + let mut n_exts = ext_unknown.len() as u8; if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - if let Some(ts) = timestamp.as_ref() { - self.write(&mut *writer, ts)?; - } - if encoding != &Encoding::default() { - self.write(&mut *writer, encoding)?; + if consolidation != &Consolidation::default() { + self.write(&mut *writer, *consolidation)?; } // Extensions - if let Some(sinfo) = ext_sinfo.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (sinfo, n_exts != 0))?; - } - if ext_consolidation != &ext::ConsolidationType::default() { - n_exts -= 1; - self.write(&mut *writer, (*ext_consolidation, n_exts != 0))?; - } - #[cfg(feature = "shared-memory")] - if let Some(eshm) = ext_shm.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (eshm, n_exts != 0))?; - } - if let Some(att) = ext_attachment.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (att, n_exts != 0))?; - } for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } // Payload - #[cfg(feature = "shared-memory")] - { - let codec = Zenoh080Sliced::::new(ext_shm.is_some()); - codec.write(&mut *writer, payload)?; - } - - #[cfg(not(feature = "shared-memory"))] - { - let bodec = Zenoh080Bounded::::new(); - bodec.write(&mut *writer, payload)?; - } + self.write(&mut *writer, payload)?; Ok(()) } @@ -144,81 +93,27 @@ where } // Body - let mut timestamp: Option = None; - if imsg::has_flag(self.header, flag::T) { - timestamp = Some(self.codec.read(&mut *reader)?); - } - - let mut encoding = Encoding::default(); - if imsg::has_flag(self.header, flag::E) { - encoding = self.codec.read(&mut *reader)?; + let mut consolidation = Consolidation::default(); + if imsg::has_flag(self.header, flag::C) { + consolidation = self.codec.read(&mut *reader)?; } // Extensions - let mut ext_sinfo: Option = None; - let mut ext_consolidation = ext::ConsolidationType::default(); - #[cfg(feature = "shared-memory")] - let mut ext_shm: Option = None; - let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; - let eodec = Zenoh080Header::new(ext); - match iext::eid(ext) { - ext::SourceInfo::ID => { - let (s, ext): (ext::SourceInfoType, bool) = eodec.read(&mut *reader)?; - ext_sinfo = Some(s); - has_ext = ext; - } - ext::Consolidation::ID => { - let (c, ext): (ext::ConsolidationType, bool) = eodec.read(&mut *reader)?; - ext_consolidation = c; - has_ext = ext; - } - #[cfg(feature = "shared-memory")] - ext::Shm::ID => { - let (s, ext): (ext::ShmType, bool) = eodec.read(&mut *reader)?; - ext_shm = Some(s); - has_ext = ext; - } - ext::Attachment::ID => { - let (a, ext): (ext::AttachmentType, bool) = eodec.read(&mut *reader)?; - ext_attachment = Some(a); - has_ext = ext; - } - _ => { - let (u, ext) = extension::read(reader, "Reply", ext)?; - ext_unknown.push(u); - has_ext = ext; - } - } + let (u, ext) = extension::read(reader, "Reply", ext)?; + ext_unknown.push(u); + has_ext = ext; } // Payload - let payload: ZBuf = { - #[cfg(feature = "shared-memory")] - { - let codec = Zenoh080Sliced::::new(ext_shm.is_some()); - codec.read(&mut *reader)? - } - - #[cfg(not(feature = "shared-memory"))] - { - let bodec = Zenoh080Bounded::::new(); - bodec.read(&mut *reader)? - } - }; + let payload: ReplyBody = self.codec.read(&mut *reader)?; Ok(Reply { - timestamp, - encoding, - ext_sinfo, - ext_consolidation, - #[cfg(feature = "shared-memory")] - ext_shm, - ext_attachment, + consolidation, ext_unknown, payload, }) diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 3fdb95e1b5..28201c1977 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -556,7 +556,7 @@ fn codec_network() { run!(NetworkMessage, NetworkMessage::rand()); } -// Zenoh new +// Zenoh #[test] fn codec_put() { run!(zenoh::Put, zenoh::Put::rand()); diff --git a/commons/zenoh-protocol/src/lib.rs b/commons/zenoh-protocol/src/lib.rs index 2e1a2fa7cf..8d26f52ed9 100644 --- a/commons/zenoh-protocol/src/lib.rs +++ b/commons/zenoh-protocol/src/lib.rs @@ -28,7 +28,7 @@ pub mod transport; pub mod zenoh; // Zenoh version -pub const VERSION: u8 = 0x08; +pub const VERSION: u8 = 0x09; // Zenoh protocol uses the following conventions for message definition and representation. // diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index e67576e673..a23eaa9b21 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -95,10 +95,11 @@ impl RequestBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..3) { + match rng.gen_range(0..4) { 0 => RequestBody::Query(Query::rand()), 1 => RequestBody::Put(Put::rand()), 2 => RequestBody::Del(Del::rand()), + 3 => RequestBody::Pull(Pull::rand()), _ => unreachable!(), } } @@ -126,8 +127,8 @@ impl From for RequestBody { #[derive(Debug, Clone, PartialEq, Eq)] pub enum ResponseBody { Reply(Reply), - Err(Err), Ack(Ack), + Err(Err), Put(Put), } @@ -135,13 +136,12 @@ impl ResponseBody { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; - let mut rng = rand::thread_rng(); match rng.gen_range(0..4) { 0 => ResponseBody::Reply(Reply::rand()), - 1 => ResponseBody::Err(Err::rand()), - 2 => ResponseBody::Ack(Ack::rand()), + 1 => ResponseBody::Ack(Ack::rand()), + 2 => ResponseBody::Err(Err::rand()), 3 => ResponseBody::Put(Put::rand()), _ => unreachable!(), } diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index 7432840492..17dfa23df8 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -69,50 +69,45 @@ impl From for Consolidation { /// /// ```text /// Flags: +/// - C: Consolidation if C==1 then consolidation is present /// - P: Parameters If P==1 then the parameters are present -/// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|X|P| QUERY | +/// |Z|P|C| QUERY | /// +-+-+-+---------+ +/// % consolidation % if C==1 +/// +---------------+ /// ~ ps: ~ if P==1 /// +---------------+ /// ~ [qry_exts] ~ if Z==1 /// +---------------+ /// ``` pub mod flag { - pub const P: u8 = 1 << 5; // 0x20 Parameters if P==1 then the parameters are present - // pub const X: u8 = 1 << 6; // 0x40 Reserved + pub const C: u8 = 1 << 5; // 0x20 Consolidation if C==1 then consolidation is present + pub const P: u8 = 1 << 6; // 0x40 Parameters if P==1 then the parameters are present pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Query { + pub consolidation: Consolidation, pub parameters: String, pub ext_sinfo: Option, - pub ext_consolidation: Consolidation, pub ext_body: Option, pub ext_attachment: Option, pub ext_unknown: Vec, } pub mod ext { - use crate::{ - common::{ZExtZ64, ZExtZBuf}, - zextz64, zextzbuf, - }; + use crate::{common::ZExtZBuf, zextzbuf}; /// # SourceInfo extension /// Used to carry additional information about the source of data pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; - /// # Consolidation extension - pub type Consolidation = zextz64!(0x2, true); - pub type ConsolidationType = crate::zenoh::query::Consolidation; - /// # QueryBody extension /// Used to carry a body attached to the query /// Shared Memory extension is automatically defined by ValueType extension if @@ -137,6 +132,7 @@ impl Query { const MIN: usize = 2; const MAX: usize = 16; + let consolidation = Consolidation::rand(); let parameters: String = if rng.gen_bool(0.5) { let len = rng.gen_range(MIN..MAX); Alphanumeric.sample_string(&mut rng, len) @@ -144,7 +140,6 @@ impl Query { String::new() }; let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let ext_consolidation = Consolidation::rand(); let ext_body = rng.gen_bool(0.5).then_some(ext::QueryBodyType::rand()); let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); let mut ext_unknown = Vec::new(); @@ -156,9 +151,9 @@ impl Query { } Self { + consolidation, parameters, ext_sinfo, - ext_consolidation, ext_body, ext_attachment, ext_unknown, diff --git a/commons/zenoh-protocol/src/zenoh/reply.rs b/commons/zenoh-protocol/src/zenoh/reply.rs index 2395e1e9b2..7cbab4ca0a 100644 --- a/commons/zenoh-protocol/src/zenoh/reply.rs +++ b/commons/zenoh-protocol/src/zenoh/reply.rs @@ -11,115 +11,61 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::Encoding}; +use crate::{ + common::ZExtUnknown, + zenoh::{query::Consolidation, PushBody}, +}; use alloc::vec::Vec; -use uhlc::Timestamp; -use zenoh_buffers::ZBuf; /// # Reply message /// /// ```text /// Flags: -/// - T: Timestamp If T==1 then the timestamp if present -/// - E: Encoding If E==1 then the encoding is present +/// - C: Consolidation if C==1 then consolidation is present +/// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|E|T| REPLY | +/// |Z|X|C| REPLY | /// +-+-+-+---------+ -/// ~ ts: ~ if T==1 -/// +---------------+ -/// ~ encoding ~ if E==1 +/// % consolidation % if C==1 /// +---------------+ /// ~ [repl_exts] ~ if Z==1 /// +---------------+ -/// ~ pl: ~ -- Payload +/// ~ ReplyBody ~ -- Payload /// +---------------+ /// ``` pub mod flag { - pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present - pub const E: u8 = 1 << 6; // 0x40 Encoding if E==1 then the encoding is present + pub const C: u8 = 1 << 5; // 0x20 Consolidation if C==1 then consolidation is present + // pub const X: u8 = 1 << 6; // 0x40 Reserved pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Reply { - pub timestamp: Option, - pub encoding: Encoding, - pub ext_sinfo: Option, - pub ext_consolidation: ext::ConsolidationType, - #[cfg(feature = "shared-memory")] - pub ext_shm: Option, - pub ext_attachment: Option, + pub consolidation: Consolidation, pub ext_unknown: Vec, - pub payload: ZBuf, + pub payload: ReplyBody, } -pub mod ext { - #[cfg(feature = "shared-memory")] - use crate::{common::ZExtUnit, zextunit}; - use crate::{ - common::{ZExtZ64, ZExtZBuf}, - zextz64, zextzbuf, - }; - - /// # SourceInfo extension - /// Used to carry additional information about the source of data - pub type SourceInfo = zextzbuf!(0x1, false); - pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; - - /// # Consolidation extension - pub type Consolidation = zextz64!(0x2, true); - pub type ConsolidationType = crate::zenoh::query::ext::ConsolidationType; - - /// # Shared Memory extension - /// Used to carry additional information about the shared-memory layour of data - #[cfg(feature = "shared-memory")] - pub type Shm = zextunit!(0x3, true); - #[cfg(feature = "shared-memory")] - pub type ShmType = crate::zenoh::ext::ShmType<{ Shm::ID }>; - - /// # User attachment - pub type Attachment = zextzbuf!(0x4, false); - pub type AttachmentType = crate::zenoh::ext::AttachmentType<{ Attachment::ID }>; -} +pub type ReplyBody = PushBody; impl Reply { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId, zenoh::Consolidation}; use rand::Rng; let mut rng = rand::thread_rng(); - let timestamp = rng.gen_bool(0.5).then_some({ - let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); - Timestamp::new(time, id) - }); - let encoding = Encoding::rand(); - let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let ext_consolidation = Consolidation::rand(); - #[cfg(feature = "shared-memory")] - let ext_shm = rng.gen_bool(0.5).then_some(ext::ShmType::rand()); - let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); + let payload = ReplyBody::rand(); + let consolidation = Consolidation::rand(); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { - ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::Attachment::ID) + 1, - false, - )); + ext_unknown.push(ZExtUnknown::rand2(1, false)); } - let payload = ZBuf::rand(rng.gen_range(1..=64)); Self { - timestamp, - encoding, - ext_sinfo, - ext_consolidation, - #[cfg(feature = "shared-memory")] - ext_shm, - ext_attachment, + consolidation, ext_unknown, payload, } diff --git a/io/zenoh-transport/src/common/stats.rs b/io/zenoh-transport/src/common/stats.rs index f095a58273..aaf39641c0 100644 --- a/io/zenoh-transport/src/common/stats.rs +++ b/io/zenoh-transport/src/common/stats.rs @@ -208,6 +208,10 @@ stats_struct! { # TYPE "counter" pub tx_z_del_msgs DiscriminatedStats, + # HELP "Counter of received bytes in zenoh del message attachments." + # TYPE "counter" + pub tx_z_del_pl_bytes DiscriminatedStats, + # HELP "Counter of sent zenoh query messages." # TYPE "counter" pub tx_z_query_msgs DiscriminatedStats, @@ -252,6 +256,10 @@ stats_struct! { # TYPE "counter" pub rx_z_del_msgs DiscriminatedStats, + # HELP "Counter of received bytes in zenoh del message attachments." + # TYPE "counter" + pub rx_z_del_pl_bytes DiscriminatedStats, + # HELP "Counter of received zenoh query messages." # TYPE "counter" pub rx_z_query_msgs DiscriminatedStats, diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 04a8f502c4..8b0e93f494 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -21,6 +21,7 @@ use zenoh_protocol::{ err::{ext::ErrBodyType, Err}, ext::ShmType, query::{ext::QueryBodyType, Query}, + reply::ReplyBody, PushBody, Put, Reply, RequestBody, ResponseBody, }, }; @@ -105,17 +106,17 @@ impl MapShm for Query { // Impl - Reply impl MapShm for Reply { fn map_to_shminfo(&mut self) -> ZResult { - let Self { - payload, ext_shm, .. - } = self; - map_to_shminfo!(payload, ext_shm) + match &mut self.payload { + ReplyBody::Put(b) => b.map_to_shminfo(), + _ => Ok(false), + } } fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { - let Self { - payload, ext_shm, .. - } = self; - map_to_shmbuf!(payload, ext_shm, shmr) + match &mut self.payload { + ReplyBody::Put(b) => b.map_to_shmbuf(shmr), + _ => Ok(false), + } } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index da6ae0c371..ffe2d3ccca 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -413,10 +413,19 @@ macro_rules! inc_stats { match &$body { PushBody::Put(p) => { stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); + let mut n = p.payload.len(); + if let Some(a) = p.ext_attachment.as_ref() { + n += a.buffer.len(); + } + stats.[<$txrx _z_put_pl_bytes>].[](n); } - PushBody::Del(_) => { + PushBody::Del(d) => { stats.[<$txrx _z_del_msgs>].[](1); + let mut n = 0; + if let Some(a) = d.ext_attachment.as_ref() { + n += a.buffer.len(); + } + stats.[<$txrx _z_del_pl_bytes>].[](n); } } } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 9645af0f74..a6748650ab 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -21,16 +21,16 @@ use async_trait::async_trait; use std::collections::HashMap; use std::sync::{Arc, Weak}; use zenoh_config::WhatAmI; -use zenoh_protocol::core::key_expr::keyexpr; -use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; +use zenoh_protocol::zenoh::reply::ReplyBody; +use zenoh_protocol::zenoh::Put; use zenoh_protocol::{ - core::{Encoding, WireExpr}, + core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ - declare::ext, + declare::{ext, queryable::ext::QueryableInfo}, request::{ext::TargetType, Request, RequestId}, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, - zenoh::{reply::ext::ConsolidationType, Reply, RequestBody, ResponseBody}, + zenoh::{query::Consolidation, Reply, RequestBody, ResponseBody}, }; use zenoh_sync::get_mut_unchecked; use zenoh_util::Timed; @@ -464,11 +464,29 @@ macro_rules! inc_res_stats { match &$body { ResponseBody::Put(p) => { stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); + let mut n = p.payload.len(); + if let Some(a) = p.ext_attachment.as_ref() { + n += a.buffer.len(); + } + stats.[<$txrx _z_put_pl_bytes>].[](n); } ResponseBody::Reply(r) => { stats.[<$txrx _z_reply_msgs>].[](1); - stats.[<$txrx _z_reply_pl_bytes>].[](r.payload.len()); + let mut n = 0; + match &r.payload { + ReplyBody::Put(p) => { + if let Some(a) = p.ext_attachment.as_ref() { + n += a.buffer.len(); + } + n += p.payload.len(); + } + ReplyBody::Del(d) => { + if let Some(a) = d.ext_attachment.as_ref() { + n += a.buffer.len(); + } + } + } + stats.[<$txrx _z_reply_pl_bytes>].[](n); } ResponseBody::Err(e) => { stats.[<$txrx _z_reply_msgs>].[](1); @@ -537,15 +555,19 @@ pub fn route_query( for (wexpr, payload) in local_replies { let payload = ResponseBody::Reply(Reply { - timestamp: None, - encoding: Encoding::default(), - ext_sinfo: None, - ext_consolidation: ConsolidationType::default(), - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment: None, // @TODO: expose it in the API - ext_unknown: vec![], - payload, + consolidation: Consolidation::default(), // @TODO: handle Del case + ext_unknown: vec![], // @TODO: handle unknown extensions + payload: ReplyBody::Put(Put { + // @TODO: handle Del case + timestamp: None, // @TODO: handle timestamp + encoding: Encoding::default(), // @TODO: handle encoding + ext_sinfo: None, // @TODO: handle source info + ext_attachment: None, // @TODO: expose it in the API + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_unknown: vec![], // @TODO: handle unknown extensions + payload, + }), }); #[cfg(feature = "stats")] if !admin { diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 9ee73d1641..4e9f4914dd 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -30,11 +30,11 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::core::WireExpr; -use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFinal}; -use zenoh_protocol::zenoh::ext::ValueType; -use zenoh_protocol::zenoh::reply::ext::ConsolidationType; -use zenoh_protocol::zenoh::{self, ResponseBody}; +use zenoh_protocol::{ + core::WireExpr, + network::{response, Mapping, RequestId, Response, ResponseFinal}, + zenoh::{self, ext::ValueType, reply::ReplyBody, Del, Put, ResponseBody}, +}; use zenoh_result::ZResult; pub(crate) struct QueryInner { @@ -206,16 +206,33 @@ impl SyncResolve for ReplyBuilder<'_> { source_id: None, source_sn: None, }; - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - data_info.source_id = source_info.source_id; - data_info.source_sn = source_info.source_sn; - if let Some(attachment) = attachment { - ext_attachment = Some(attachment.into()); - } + + // Use a macro for inferring the proper const extension ID between Put and Del cases + macro_rules! ext_attachment { + () => {{ + #[allow(unused_mut)] + let mut ext_attachment = None; + #[cfg(feature = "unstable")] + { + data_info.source_id = source_info.source_id; + data_info.source_sn = source_info.source_sn; + if let Some(attachment) = attachment { + ext_attachment = Some(attachment.into()); + } + } + ext_attachment + }}; } + + let ext_sinfo = if data_info.source_id.is_some() || data_info.source_sn.is_some() { + Some(zenoh::put::ext::SourceInfoType { + zid: data_info.source_id.unwrap_or_default(), + eid: 0, // @TODO use proper EntityId (#703) + sn: data_info.source_sn.unwrap_or_default() as u32, + }) + } else { + None + }; self.query.inner.primitives.send_response(Response { rid: self.query.inner.qid, wire_expr: WireExpr { @@ -224,24 +241,26 @@ impl SyncResolve for ReplyBuilder<'_> { mapping: Mapping::Sender, }, payload: ResponseBody::Reply(zenoh::Reply { - timestamp: data_info.timestamp, - encoding: data_info.encoding.unwrap_or_default(), - ext_sinfo: if data_info.source_id.is_some() || data_info.source_sn.is_some() - { - Some(zenoh::reply::ext::SourceInfoType { - zid: data_info.source_id.unwrap_or_default(), - eid: 0, // @TODO use proper EntityId (#703) - sn: data_info.source_sn.unwrap_or_default() as u32, - }) - } else { - None - }, - ext_consolidation: ConsolidationType::default(), - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment, + consolidation: zenoh::Consolidation::default(), ext_unknown: vec![], - payload, + payload: match kind { + SampleKind::Put => ReplyBody::Put(Put { + timestamp: data_info.timestamp, + encoding: data_info.encoding.unwrap_or_default(), + ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment: ext_attachment!(), + ext_unknown: vec![], + payload, + }), + SampleKind::Delete => ReplyBody::Del(Del { + timestamp, + ext_sinfo, + ext_attachment: ext_attachment!(), + ext_unknown: vec![], + }), + }, }), ext_qos: response::ext::QoSType::response_default(), ext_tstamp: None, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index d52c446d3d..46cfd5e499 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -57,6 +57,9 @@ use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; use zenoh_protocol::network::AtomicRequestId; use zenoh_protocol::network::RequestId; +use zenoh_protocol::zenoh::reply::ReplyBody; +use zenoh_protocol::zenoh::Del; +use zenoh_protocol::zenoh::Put; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, @@ -73,10 +76,7 @@ use zenoh_protocol::{ Mapping, Push, Response, ResponseFinal, }, zenoh::{ - query::{ - self, - ext::{ConsolidationType, QueryBodyType}, - }, + query::{self, ext::QueryBodyType, Consolidation}, Pull, PushBody, RequestBody, ResponseBody, }, }; @@ -1808,9 +1808,9 @@ impl Session { ext_budget: None, ext_timeout: Some(timeout), payload: RequestBody::Query(zenoh_protocol::zenoh::Query { + consolidation: consolidation.into(), parameters: selector.parameters().to_string(), ext_sinfo: None, - ext_consolidation: consolidation.into(), ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, @@ -1851,7 +1851,7 @@ impl Session { parameters: &str, qid: RequestId, _target: TargetType, - _consolidation: ConsolidationType, + _consolidation: Consolidation, body: Option, #[cfg(feature = "unstable")] attachment: Option, ) { @@ -2233,7 +2233,7 @@ impl Primitives for Session { &m.parameters, msg.id, msg.ext_target, - m.ext_consolidation, + m.consolidation, m.ext_body, #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), @@ -2341,19 +2341,63 @@ impl Primitives for Session { } None => key_expr, }; - let info = DataInfo { - kind: SampleKind::Put, - encoding: Some(m.encoding), - timestamp: m.timestamp, - source_id: m.ext_sinfo.as_ref().map(|i| i.zid), - source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), + + struct Ret { + payload: ZBuf, + info: DataInfo, + #[cfg(feature = "unstable")] + attachment: Option, + } + let Ret { + payload, + info, + #[cfg(feature = "unstable")] + attachment, + } = match m.payload { + ReplyBody::Put(Put { + timestamp, + encoding, + ext_sinfo, + ext_attachment: _attachment, + payload, + .. + }) => Ret { + payload, + info: DataInfo { + kind: SampleKind::Put, + encoding: Some(encoding), + timestamp, + source_id: ext_sinfo.as_ref().map(|i| i.zid), + source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), + }, + #[cfg(feature = "unstable")] + attachment: _attachment.map(Into::into), + }, + ReplyBody::Del(Del { + timestamp, + ext_sinfo, + ext_attachment: _attachment, + .. + }) => Ret { + payload: ZBuf::empty(), + info: DataInfo { + kind: SampleKind::Delete, + encoding: None, + timestamp, + source_id: ext_sinfo.as_ref().map(|i| i.zid), + source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), + }, + #[cfg(feature = "unstable")] + attachment: _attachment.map(Into::into), + }, }; + #[allow(unused_mut)] let mut sample = - Sample::with_info(key_expr.into_owned(), m.payload, Some(info)); + Sample::with_info(key_expr.into_owned(), payload, Some(info)); #[cfg(feature = "unstable")] { - sample.attachment = m.ext_attachment.map(Into::into); + sample.attachment = attachment; } let new_reply = Reply { sample: Ok(sample), diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index c2cec7c627..f727ad60c3 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -153,10 +153,31 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let c_msgs = msgs.clone(); let qbl = ztimeout!(peer01 .declare_queryable(key_expr) - .callback(move |sample| { + .callback(move |query| { c_msgs.fetch_add(1, Ordering::Relaxed); - let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - task::block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }); + match query.parameters() { + "ok_put" => { + let mut rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); + rep.kind = SampleKind::Put; + task::block_on(async { + ztimeout!(query.reply(Ok(rep)).res_async()).unwrap() + }); + } + "ok_del" => { + let mut rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); + rep.kind = SampleKind::Delete; + task::block_on(async { + ztimeout!(query.reply(Ok(rep)).res_async()).unwrap() + }); + } + "err" => { + let rep = Value::from(vec![0u8; size]); + task::block_on(async { + ztimeout!(query.reply(Err(rep)).res_async()).unwrap() + }); + } + _ => panic!("Unknown query parameter"), + } }) .res_async()) .unwrap(); @@ -165,12 +186,15 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re task::sleep(SLEEP).await; // Get data - println!("[QR][02c] Getting on peer02 session. {msg_count} msgs."); + println!("[QR][02c] Getting Ok(Put) on peer02 session. {msg_count} msgs."); let mut cnt = 0; for _ in 0..msg_count { - let rs = ztimeout!(peer02.get(key_expr).res_async()).unwrap(); + let selector = format!("{}?ok_put", key_expr); + let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().value.payload.len(), size); + let s = s.sample.unwrap(); + assert_eq!(s.kind, SampleKind::Put); + assert_eq!(s.value.payload.len(), size); cnt += 1; } } @@ -178,6 +202,41 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re assert_eq!(msgs.load(Ordering::Relaxed), msg_count); assert_eq!(cnt, msg_count); + msgs.store(0, Ordering::Relaxed); + + println!("[QR][03c] Getting Ok(Delete) on peer02 session. {msg_count} msgs."); + let mut cnt = 0; + for _ in 0..msg_count { + let selector = format!("{}?ok_del", key_expr); + let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + while let Ok(s) = ztimeout!(rs.recv_async()) { + let s = s.sample.unwrap(); + assert_eq!(s.kind, SampleKind::Delete); + assert_eq!(s.value.payload.len(), 0); + cnt += 1; + } + } + println!("[QR][03c] Got on peer02 session. {cnt}/{msg_count} msgs."); + assert_eq!(msgs.load(Ordering::Relaxed), msg_count); + assert_eq!(cnt, msg_count); + + msgs.store(0, Ordering::Relaxed); + + println!("[QR][04c] Getting Err() on peer02 session. {msg_count} msgs."); + let mut cnt = 0; + for _ in 0..msg_count { + let selector = format!("{}?err", key_expr); + let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + while let Ok(s) = ztimeout!(rs.recv_async()) { + let e = s.sample.unwrap_err(); + assert_eq!(e.payload.len(), size); + cnt += 1; + } + } + println!("[QR][04c] Got on peer02 session. {cnt}/{msg_count} msgs."); + assert_eq!(msgs.load(Ordering::Relaxed), msg_count); + assert_eq!(cnt, msg_count); + println!("[PS][03c] Unqueryable on peer01 session"); ztimeout!(qbl.undeclare().res_async()).unwrap(); From d6ffebf080958157ac141c92b51b9fe00c075227 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 15 Feb 2024 11:58:21 +0100 Subject: [PATCH 002/357] Clean-up of protocol types (#729) * Update Reply protocol definition and codec * Make consolidation a flag in Query/Reply * Fix wrong Consolidation cast in codec * Apply Reply changes to routing * Fix shared-memory feature * Fix stats * Bump Zenoh Protocol Version * Add query/reply ok(put|del)/err() tests * Clean-up of code * Default CongestionControl for Push is Drop * Fix Priority::DEFAULT typo * Define DEFAULT consts * ConsolidationMode moved into the API * Remove unused Ack message * Fix Ack leftovers * CongestionControl::DEFAULT * QoSType::DEFAULT * Mapping::DEFAULT * Encoding::DEFAULT * QueryTarget::DEFAULT * NodeType::DEFAULT * QueryableInfo::DEFAULT * Remove ConsolidationMode from zenoh-protocol * ConsolidationType::DEFAULT * Remove dead code * Remove dead code * Move SampleKind to sample.rs * Cleanup SubMode * Cleanup QueryTarget * Remove emptyline --- commons/zenoh-codec/benches/codec.rs | 48 +++---- commons/zenoh-codec/src/common/mod.rs | 1 - commons/zenoh-codec/src/common/priority.rs | 66 --------- commons/zenoh-codec/src/core/mod.rs | 1 - commons/zenoh-codec/src/core/property.rs | 84 ----------- commons/zenoh-codec/src/core/wire_expr.rs | 2 +- commons/zenoh-codec/src/network/declare.rs | 32 ++--- commons/zenoh-codec/src/network/mod.rs | 2 +- commons/zenoh-codec/src/network/oam.rs | 7 +- commons/zenoh-codec/src/network/push.rs | 14 +- commons/zenoh-codec/src/network/request.rs | 20 +-- commons/zenoh-codec/src/network/response.rs | 15 +- commons/zenoh-codec/src/transport/fragment.rs | 6 +- commons/zenoh-codec/src/transport/frame.rs | 6 +- commons/zenoh-codec/src/transport/join.rs | 2 +- commons/zenoh-codec/src/transport/oam.rs | 6 +- commons/zenoh-codec/src/zenoh/ack.rs | 129 ----------------- commons/zenoh-codec/src/zenoh/mod.rs | 3 - commons/zenoh-codec/src/zenoh/put.rs | 6 +- commons/zenoh-codec/src/zenoh/query.rs | 6 +- commons/zenoh-codec/src/zenoh/reply.rs | 6 +- commons/zenoh-codec/tests/codec.rs | 5 - commons/zenoh-protocol/src/common/mod.rs | 15 -- commons/zenoh-protocol/src/core/encoding.rs | 2 + commons/zenoh-protocol/src/core/locator.rs | 64 --------- commons/zenoh-protocol/src/core/mod.rs | 98 ++----------- commons/zenoh-protocol/src/core/wire_expr.rs | 2 +- commons/zenoh-protocol/src/network/declare.rs | 28 +++- commons/zenoh-protocol/src/network/mod.rs | 44 +++--- commons/zenoh-protocol/src/network/request.rs | 13 +- commons/zenoh-protocol/src/transport/mod.rs | 12 +- commons/zenoh-protocol/src/zenoh/ack.rs | 84 ----------- commons/zenoh-protocol/src/zenoh/mod.rs | 19 +-- commons/zenoh-protocol/src/zenoh/query.rs | 14 +- examples/examples/z_pub_thr.rs | 2 +- io/zenoh-transport/src/common/batch.rs | 8 +- io/zenoh-transport/src/common/pipeline.rs | 16 +-- io/zenoh-transport/src/multicast/link.rs | 2 +- io/zenoh-transport/src/multicast/rx.rs | 4 +- io/zenoh-transport/src/shm.rs | 2 - .../src/unicast/establishment/cookie.rs | 1 - .../src/unicast/establishment/properties.rs | 132 ------------------ .../src/unicast/universal/rx.rs | 4 +- .../tests/multicast_compression.rs | 6 +- .../tests/multicast_transport.rs | 6 +- .../tests/unicast_compression.rs | 12 +- .../tests/unicast_concurrent.rs | 12 +- .../tests/unicast_defragmentation.rs | 16 +-- .../tests/unicast_intermittent.rs | 6 +- .../tests/unicast_priorities.rs | 4 +- io/zenoh-transport/tests/unicast_shm.rs | 12 +- .../tests/unicast_simultaneous.rs | 4 +- io/zenoh-transport/tests/unicast_transport.rs | 56 ++++---- zenoh-ext/src/subscriber_ext.rs | 8 +- zenoh/src/key_expr.rs | 4 +- zenoh/src/liveliness.rs | 6 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 4 +- zenoh/src/net/routing/dispatcher/queries.rs | 29 ++-- zenoh/src/net/routing/dispatcher/resource.rs | 4 +- zenoh/src/net/routing/hat/client/pubsub.rs | 16 +-- zenoh/src/net/routing/hat/client/queries.rs | 12 +- .../net/routing/hat/linkstate_peer/network.rs | 2 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 20 +-- .../net/routing/hat/linkstate_peer/queries.rs | 20 +-- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 2 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 16 +-- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 12 +- zenoh/src/net/routing/hat/router/network.rs | 2 +- zenoh/src/net/routing/hat/router/pubsub.rs | 36 ++--- zenoh/src/net/routing/hat/router/queries.rs | 36 ++--- zenoh/src/net/runtime/adminspace.rs | 14 +- zenoh/src/net/tests/tables.rs | 40 +++--- zenoh/src/prelude.rs | 4 +- zenoh/src/publication.rs | 6 +- zenoh/src/query.rs | 33 ++++- zenoh/src/queryable.rs | 8 +- zenoh/src/sample.rs | 40 +++++- zenoh/src/session.rs | 77 +++++----- zenoh/src/subscriber.rs | 17 --- 79 files changed, 510 insertions(+), 1125 deletions(-) delete mode 100644 commons/zenoh-codec/src/common/priority.rs delete mode 100644 commons/zenoh-codec/src/core/property.rs delete mode 100644 commons/zenoh-codec/src/zenoh/ack.rs delete mode 100644 commons/zenoh-protocol/src/zenoh/ack.rs delete mode 100644 io/zenoh-transport/src/unicast/establishment/properties.rs diff --git a/commons/zenoh-codec/benches/codec.rs b/commons/zenoh-codec/benches/codec.rs index 1c46a700a7..34c9313a7f 100644 --- a/commons/zenoh-codec/benches/codec.rs +++ b/commons/zenoh-codec/benches/codec.rs @@ -75,19 +75,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -121,19 +121,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -162,19 +162,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -210,12 +210,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -238,12 +238,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -277,12 +277,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/commons/zenoh-codec/src/common/mod.rs b/commons/zenoh-codec/src/common/mod.rs index 4c25c93241..f34f9872bf 100644 --- a/commons/zenoh-codec/src/common/mod.rs +++ b/commons/zenoh-codec/src/common/mod.rs @@ -12,4 +12,3 @@ // ZettaScale Zenoh Team, // pub mod extension; -mod priority; diff --git a/commons/zenoh-codec/src/common/priority.rs b/commons/zenoh-codec/src/common/priority.rs deleted file mode 100644 index 776229971e..0000000000 --- a/commons/zenoh-codec/src/common/priority.rs +++ /dev/null @@ -1,66 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; -use core::convert::TryInto; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; -use zenoh_protocol::{common::imsg, core::Priority}; - -impl WCodec<&Priority, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Priority) -> Self::Output { - // Header - let header = imsg::id::PRIORITY | ((*x as u8) << imsg::HEADER_BITS); - self.write(&mut *writer, header)?; - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, _reader: &mut R) -> Result { - if imsg::mid(self.header) != imsg::id::PRIORITY { - return Err(DidntRead); - } - - let priority: Priority = (imsg::flags(self.header) >> imsg::HEADER_BITS) - .try_into() - .map_err(|_| DidntRead)?; - Ok(priority) - } -} diff --git a/commons/zenoh-codec/src/core/mod.rs b/commons/zenoh-codec/src/core/mod.rs index 1f48def695..c8e19f057f 100644 --- a/commons/zenoh-codec/src/core/mod.rs +++ b/commons/zenoh-codec/src/core/mod.rs @@ -13,7 +13,6 @@ // mod encoding; mod locator; -mod property; #[cfg(feature = "shared-memory")] mod shm; mod timestamp; diff --git a/commons/zenoh-codec/src/core/property.rs b/commons/zenoh-codec/src/core/property.rs deleted file mode 100644 index bb7f760208..0000000000 --- a/commons/zenoh-codec/src/core/property.rs +++ /dev/null @@ -1,84 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{RCodec, WCodec, Zenoh080}; -use alloc::vec::Vec; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; -use zenoh_protocol::core::Property; - -impl WCodec<&Property, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Property) -> Self::Output { - let Property { key, value } = x; - - self.write(&mut *writer, key)?; - self.write(&mut *writer, value.as_slice())?; - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let key: u64 = self.read(&mut *reader)?; - let value: Vec = self.read(&mut *reader)?; - - Ok(Property { key, value }) - } -} - -impl WCodec<&[Property], &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &[Property]) -> Self::Output { - self.write(&mut *writer, x.len())?; - for p in x.iter() { - self.write(&mut *writer, p)?; - } - - Ok(()) - } -} - -impl RCodec, &mut R> for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result, Self::Error> { - let num: usize = self.read(&mut *reader)?; - - let mut ps = Vec::with_capacity(num); - for _ in 0..num { - let p: Property = self.read(&mut *reader)?; - ps.push(p); - } - - Ok(ps) - } -} diff --git a/commons/zenoh-codec/src/core/wire_expr.rs b/commons/zenoh-codec/src/core/wire_expr.rs index 6caba6c8c7..aa6f77b379 100644 --- a/commons/zenoh-codec/src/core/wire_expr.rs +++ b/commons/zenoh-codec/src/core/wire_expr.rs @@ -65,7 +65,7 @@ where Ok(WireExpr { scope, suffix: suffix.into(), - mapping: Mapping::default(), + mapping: Mapping::DEFAULT, }) } } diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index 20916dc359..cf92b27c17 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -102,16 +102,16 @@ where // Header let mut header = id::DECLARE; - let mut n_exts = ((ext_qos != &declare::ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_nodeid != &declare::ext::NodeIdType::default()) as u8); + + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); if n_exts != 0 { header |= declare::flag::Z; } self.write(&mut *writer, header)?; // Extensions - if ext_qos != &declare::ext::QoSType::default() { + if ext_qos != &declare::ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -119,7 +119,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_nodeid != &declare::ext::NodeIdType::default() { + if ext_nodeid != &declare::ext::NodeIdType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -157,9 +157,9 @@ where } // Extensions - let mut ext_qos = declare::ext::QoSType::default(); + let mut ext_qos = declare::ext::QoSType::DEFAULT; let mut ext_tstamp = None; - let mut ext_nodeid = declare::ext::NodeIdType::default(); + let mut ext_nodeid = declare::ext::NodeIdType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, declare::flag::Z); while has_ext { @@ -340,11 +340,11 @@ where // Header let mut header = declare::id::D_SUBSCRIBER; - let mut n_exts = (ext_info != &subscriber::ext::SubscriberInfo::default()) as u8; + let mut n_exts = (ext_info != &subscriber::ext::SubscriberInfo::DEFAULT) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -357,7 +357,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_info != &subscriber::ext::SubscriberInfo::default() { + if ext_info != &subscriber::ext::SubscriberInfo::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_info, n_exts != 0))?; } @@ -402,7 +402,7 @@ where }; // Extensions - let mut ext_info = subscriber::ext::SubscriberInfo::default(); + let mut ext_info = subscriber::ext::SubscriberInfo::DEFAULT; let mut has_ext = imsg::has_flag(self.header, subscriber::flag::Z); while has_ext { @@ -524,11 +524,11 @@ where // Header let mut header = declare::id::D_QUERYABLE; - let mut n_exts = (ext_info != &queryable::ext::QueryableInfo::default()) as u8; + let mut n_exts = (ext_info != &queryable::ext::QueryableInfo::DEFAULT) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -539,7 +539,7 @@ where // Body self.write(&mut *writer, id)?; self.write(&mut *writer, wire_expr)?; - if ext_info != &queryable::ext::QueryableInfo::default() { + if ext_info != &queryable::ext::QueryableInfo::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_info, n_exts != 0))?; } @@ -584,7 +584,7 @@ where }; // Extensions - let mut ext_info = queryable::ext::QueryableInfo::default(); + let mut ext_info = queryable::ext::QueryableInfo::DEFAULT; let mut has_ext = imsg::has_flag(self.header, queryable::flag::Z); while has_ext { @@ -699,7 +699,7 @@ where // Header let mut header = declare::id::D_TOKEN; - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -851,7 +851,7 @@ where // Header let mut header = declare::id::D_INTEREST; - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index c1f2489b88..dade13d362 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -58,7 +58,7 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let codec = Zenoh080Reliability::new(Reliability::default()); + let codec = Zenoh080Reliability::new(Reliability::DEFAULT); codec.read(reader) } } diff --git a/commons/zenoh-codec/src/network/oam.rs b/commons/zenoh-codec/src/network/oam.rs index ff6daeb020..9751e9952d 100644 --- a/commons/zenoh-codec/src/network/oam.rs +++ b/commons/zenoh-codec/src/network/oam.rs @@ -52,8 +52,7 @@ where header |= iext::ENC_ZBUF; } } - let mut n_exts = - ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8); + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -63,7 +62,7 @@ where self.write(&mut *writer, id)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -115,7 +114,7 @@ where let id: OamId = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); diff --git a/commons/zenoh-codec/src/network/push.rs b/commons/zenoh-codec/src/network/push.rs index 10a8489b29..b9ec2ba5db 100644 --- a/commons/zenoh-codec/src/network/push.rs +++ b/commons/zenoh-codec/src/network/push.rs @@ -44,13 +44,13 @@ where // Header let mut header = id::PUSH; - let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_nodeid != &ext::NodeIdType::default()) as u8); + + ((ext_nodeid != &ext::NodeIdType::DEFAULT) as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= flag::M; } if wire_expr.has_suffix() { @@ -62,7 +62,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -70,7 +70,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_nodeid != &ext::NodeIdType::default() { + if ext_nodeid != &ext::NodeIdType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -116,9 +116,9 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; - let mut ext_nodeid = ext::NodeIdType::default(); + let mut ext_nodeid = ext::NodeIdType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/network/request.rs b/commons/zenoh-codec/src/network/request.rs index 19711ff147..364c1af3d0 100644 --- a/commons/zenoh-codec/src/network/request.rs +++ b/commons/zenoh-codec/src/network/request.rs @@ -93,16 +93,16 @@ where // Header let mut header = id::REQUEST; - let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_target != &ext::TargetType::default()) as u8) + + ((ext_target != &ext::TargetType::DEFAULT) as u8) + (ext_budget.is_some() as u8) + (ext_timeout.is_some() as u8) - + ((ext_nodeid != &ext::NodeIdType::default()) as u8); + + ((ext_nodeid != &ext::NodeIdType::DEFAULT) as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= flag::M; } if wire_expr.has_suffix() { @@ -115,7 +115,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -123,7 +123,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_target != &ext::TargetType::default() { + if ext_target != &ext::TargetType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (ext_target, n_exts != 0))?; } @@ -137,7 +137,7 @@ where let e = ext::Timeout::new(to.as_millis() as u64); self.write(&mut *writer, (&e, n_exts != 0))?; } - if ext_nodeid != &ext::NodeIdType::default() { + if ext_nodeid != &ext::NodeIdType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -185,10 +185,10 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; - let mut ext_nodeid = ext::NodeIdType::default(); - let mut ext_target = ext::TargetType::default(); + let mut ext_nodeid = ext::NodeIdType::DEFAULT; + let mut ext_target = ext::TargetType::DEFAULT; let mut ext_limit = None; let mut ext_timeout = None; diff --git a/commons/zenoh-codec/src/network/response.rs b/commons/zenoh-codec/src/network/response.rs index bec7df2967..5b69e8b109 100644 --- a/commons/zenoh-codec/src/network/response.rs +++ b/commons/zenoh-codec/src/network/response.rs @@ -48,13 +48,13 @@ where // Header let mut header = id::RESPONSE; - let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) + (ext_respid.is_some() as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= flag::M; } if wire_expr.has_suffix() { @@ -67,7 +67,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -123,7 +123,7 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; let mut ext_respid = None; @@ -183,8 +183,7 @@ where // Header let mut header = id::RESPONSE_FINAL; - let mut n_exts = - ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8); + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -194,7 +193,7 @@ where self.write(&mut *writer, rid)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -236,7 +235,7 @@ where let rid: RequestId = bodec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); diff --git a/commons/zenoh-codec/src/transport/fragment.rs b/commons/zenoh-codec/src/transport/fragment.rs index b66f395df1..b01e2c2bae 100644 --- a/commons/zenoh-codec/src/transport/fragment.rs +++ b/commons/zenoh-codec/src/transport/fragment.rs @@ -48,7 +48,7 @@ where if *more { header |= flag::M; } - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { header |= flag::Z; } self.write(&mut *writer, header)?; @@ -57,7 +57,7 @@ where self.write(&mut *writer, sn)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { self.write(&mut *writer, (*ext_qos, false))?; } @@ -97,7 +97,7 @@ where let sn: TransportSn = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/transport/frame.rs b/commons/zenoh-codec/src/transport/frame.rs index 8d39aabcdb..ab82a024c4 100644 --- a/commons/zenoh-codec/src/transport/frame.rs +++ b/commons/zenoh-codec/src/transport/frame.rs @@ -46,7 +46,7 @@ where if let Reliability::Reliable = reliability { header |= flag::R; } - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { header |= flag::Z; } self.write(&mut *writer, header)?; @@ -55,7 +55,7 @@ where self.write(&mut *writer, sn)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { self.write(&mut *writer, (x.ext_qos, false))?; } @@ -94,7 +94,7 @@ where let sn: TransportSn = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/transport/join.rs b/commons/zenoh-codec/src/transport/join.rs index 80c1663413..d87ceecc78 100644 --- a/commons/zenoh-codec/src/transport/join.rs +++ b/commons/zenoh-codec/src/transport/join.rs @@ -121,7 +121,7 @@ where let (_, more): (ZExtZBufHeader<{ ext::QoS::ID }>, bool) = self.read(&mut *reader)?; // Body - let mut ext_qos = Box::new([PrioritySn::default(); Priority::NUM]); + let mut ext_qos = Box::new([PrioritySn::DEFAULT; Priority::NUM]); for p in ext_qos.iter_mut() { *p = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/src/transport/oam.rs b/commons/zenoh-codec/src/transport/oam.rs index e2f905abf8..6861f638d3 100644 --- a/commons/zenoh-codec/src/transport/oam.rs +++ b/commons/zenoh-codec/src/transport/oam.rs @@ -47,7 +47,7 @@ where header |= iext::ENC_ZBUF; } } - let mut n_exts = (ext_qos != &ext::QoSType::default()) as u8; + let mut n_exts = (ext_qos != &ext::QoSType::DEFAULT) as u8; if n_exts != 0 { header |= flag::Z; } @@ -57,7 +57,7 @@ where self.write(&mut *writer, id)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -105,7 +105,7 @@ where let id: OamId = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/zenoh/ack.rs b/commons/zenoh-codec/src/zenoh/ack.rs deleted file mode 100644 index 78cbca2987..0000000000 --- a/commons/zenoh-codec/src/zenoh/ack.rs +++ /dev/null @@ -1,129 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; -use alloc::vec::Vec; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; -use zenoh_protocol::{ - common::{iext, imsg}, - zenoh::{ - ack::{ext, flag, Ack}, - id, - }, -}; - -impl WCodec<&Ack, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Ack) -> Self::Output { - let Ack { - timestamp, - ext_sinfo, - ext_unknown, - } = x; - - // Header - let mut header = id::ACK; - if timestamp.is_some() { - header |= flag::T; - } - let mut n_exts = ((ext_sinfo.is_some()) as u8) + (ext_unknown.len() as u8); - if n_exts != 0 { - header |= flag::Z; - } - self.write(&mut *writer, header)?; - - // Body - if let Some(ts) = timestamp.as_ref() { - self.write(&mut *writer, ts)?; - } - - // Extensions - if let Some(sinfo) = ext_sinfo.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (sinfo, n_exts != 0))?; - } - for u in ext_unknown.iter() { - n_exts -= 1; - self.write(&mut *writer, (u, n_exts != 0))?; - } - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != id::ACK { - return Err(DidntRead); - } - - // Body - let mut timestamp: Option = None; - if imsg::has_flag(self.header, flag::T) { - timestamp = Some(self.codec.read(&mut *reader)?); - } - - // Extensions - let mut ext_sinfo: Option = None; - let mut ext_unknown = Vec::new(); - - let mut has_ext = imsg::has_flag(self.header, flag::Z); - while has_ext { - let ext: u8 = self.codec.read(&mut *reader)?; - let eodec = Zenoh080Header::new(ext); - match iext::eid(ext) { - ext::SourceInfo::ID => { - let (s, ext): (ext::SourceInfoType, bool) = eodec.read(&mut *reader)?; - ext_sinfo = Some(s); - has_ext = ext; - } - _ => { - let (u, ext) = extension::read(reader, "Ack", ext)?; - ext_unknown.push(u); - has_ext = ext; - } - } - } - - Ok(Ack { - timestamp, - ext_sinfo, - ext_unknown, - }) - } -} diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index d59add9d63..fdff09be94 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -pub mod ack; pub mod del; pub mod err; pub mod pull; @@ -121,7 +120,6 @@ where fn write(self, writer: &mut W, x: &ResponseBody) -> Self::Output { match x { ResponseBody::Reply(b) => self.write(&mut *writer, b), - ResponseBody::Ack(b) => self.write(&mut *writer, b), ResponseBody::Err(b) => self.write(&mut *writer, b), ResponseBody::Put(b) => self.write(&mut *writer, b), } @@ -140,7 +138,6 @@ where let codec = Zenoh080Header::new(header); let body = match imsg::mid(codec.header) { id::REPLY => ResponseBody::Reply(codec.read(&mut *reader)?), - id::ACK => ResponseBody::Ack(codec.read(&mut *reader)?), id::ERR => ResponseBody::Err(codec.read(&mut *reader)?), id::PUT => ResponseBody::Put(codec.read(&mut *reader)?), _ => return Err(DidntRead), diff --git a/commons/zenoh-codec/src/zenoh/put.rs b/commons/zenoh-codec/src/zenoh/put.rs index ebc364cf9b..4f50be4872 100644 --- a/commons/zenoh-codec/src/zenoh/put.rs +++ b/commons/zenoh-codec/src/zenoh/put.rs @@ -54,7 +54,7 @@ where if timestamp.is_some() { header |= flag::T; } - if encoding != &Encoding::default() { + if encoding != &Encoding::DEFAULT { header |= flag::E; } let mut n_exts = (ext_sinfo.is_some()) as u8 @@ -73,7 +73,7 @@ where if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } - if encoding != &Encoding::default() { + if encoding != &Encoding::DEFAULT { self.write(&mut *writer, encoding)?; } @@ -143,7 +143,7 @@ where timestamp = Some(self.codec.read(&mut *reader)?); } - let mut encoding = Encoding::default(); + let mut encoding = Encoding::DEFAULT; if imsg::has_flag(self.header, flag::E) { encoding = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index cb0506e474..55f25cd5ea 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -83,7 +83,7 @@ where // Header let mut header = id::QUERY; - if consolidation != &Consolidation::default() { + if consolidation != &Consolidation::DEFAULT { header |= flag::C; } if !parameters.is_empty() { @@ -99,7 +99,7 @@ where self.write(&mut *writer, header)?; // Body - if consolidation != &Consolidation::default() { + if consolidation != &Consolidation::DEFAULT { self.write(&mut *writer, *consolidation)?; } if !parameters.is_empty() { @@ -153,7 +153,7 @@ where } // Body - let mut consolidation = Consolidation::default(); + let mut consolidation = Consolidation::DEFAULT; if imsg::has_flag(self.header, flag::C) { consolidation = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/src/zenoh/reply.rs b/commons/zenoh-codec/src/zenoh/reply.rs index d54e98cc5e..308004a1c2 100644 --- a/commons/zenoh-codec/src/zenoh/reply.rs +++ b/commons/zenoh-codec/src/zenoh/reply.rs @@ -41,7 +41,7 @@ where // Header let mut header = id::REPLY; - if consolidation != &Consolidation::default() { + if consolidation != &Consolidation::DEFAULT { header |= flag::C; } let mut n_exts = ext_unknown.len() as u8; @@ -51,7 +51,7 @@ where self.write(&mut *writer, header)?; // Body - if consolidation != &Consolidation::default() { + if consolidation != &Consolidation::DEFAULT { self.write(&mut *writer, *consolidation)?; } @@ -93,7 +93,7 @@ where } // Body - let mut consolidation = Consolidation::default(); + let mut consolidation = Consolidation::DEFAULT; if imsg::has_flag(self.header, flag::C) { consolidation = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 28201c1977..7f23214b49 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -582,11 +582,6 @@ fn codec_err() { run!(zenoh::Err, zenoh::Err::rand()); } -#[test] -fn codec_ack() { - run!(zenoh::Ack, zenoh::Ack::rand()); -} - #[test] fn codec_pull() { run!(zenoh::Pull, zenoh::Pull::rand()); diff --git a/commons/zenoh-protocol/src/common/mod.rs b/commons/zenoh-protocol/src/common/mod.rs index d11d0b0c52..ef53e5a8ac 100644 --- a/commons/zenoh-protocol/src/common/mod.rs +++ b/commons/zenoh-protocol/src/common/mod.rs @@ -19,21 +19,6 @@ pub use extension::*; /*************************************/ // Inner Message IDs pub mod imsg { - pub mod id { - // Zenoh Messages - pub const DECLARE: u8 = 0x0b; - pub const DATA: u8 = 0x0c; - pub const QUERY: u8 = 0x0d; - pub const PULL: u8 = 0x0e; - pub const UNIT: u8 = 0x0f; - pub const LINK_STATE_LIST: u8 = 0x10; - - // Message decorators - pub const PRIORITY: u8 = 0x1c; - pub const ROUTING_CONTEXT: u8 = 0x1d; - pub const REPLY_CONTEXT: u8 = 0x1e; - } - // Header mask pub const HEADER_BITS: u8 = 5; pub const HEADER_MASK: u8 = !(0xff << HEADER_BITS); diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index f202b8e79c..b3abae8aae 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -266,6 +266,8 @@ impl Default for Encoding { } impl Encoding { + pub const DEFAULT: Self = Self::EMPTY; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::{ diff --git a/commons/zenoh-protocol/src/core/locator.rs b/commons/zenoh-protocol/src/core/locator.rs index cdd3dfa64c..42379f2b65 100644 --- a/commons/zenoh-protocol/src/core/locator.rs +++ b/commons/zenoh-protocol/src/core/locator.rs @@ -122,67 +122,3 @@ impl Locator { EndPoint::rand().into() } } - -// pub(crate) trait HasCanonForm { -// fn is_canon(&self) -> bool; - -// type Output; -// fn canonicalize(self) -> Self::Output; -// } - -// fn cmp(this: &str, than: &str) -> core::cmp::Ordering { -// let is_longer = this.len().cmp(&than.len()); -// let this = this.chars(); -// let than = than.chars(); -// let zip = this.zip(than); -// for (this, than) in zip { -// match this.cmp(&than) { -// core::cmp::Ordering::Equal => {} -// o => return o, -// } -// } -// is_longer -// } - -// impl<'a, T: Iterator + Clone, V> HasCanonForm for T { -// fn is_canon(&self) -> bool { -// let mut iter = self.clone(); -// let mut acc = if let Some((key, _)) = iter.next() { -// key -// } else { -// return true; -// }; -// for (key, _) in iter { -// if cmp(key, acc) != core::cmp::Ordering::Greater { -// return false; -// } -// acc = key; -// } -// true -// } - -// type Output = Vec<(&'a str, V)>; -// fn canonicalize(mut self) -> Self::Output { -// let mut result = Vec::new(); -// if let Some(v) = self.next() { -// result.push(v); -// } -// 'outer: for (k, v) in self { -// for (i, (x, _)) in result.iter().enumerate() { -// match cmp(k, x) { -// core::cmp::Ordering::Less => { -// result.insert(i, (k, v)); -// continue 'outer; -// } -// core::cmp::Ordering::Equal => { -// result[i].1 = v; -// continue 'outer; -// } -// core::cmp::Ordering::Greater => {} -// } -// } -// result.push((k, v)) -// } -// result -// } -// } diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 2547034c44..3e9315bec2 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -16,7 +16,6 @@ use alloc::{ boxed::Box, format, string::{String, ToString}, - vec::Vec, }; use core::{ convert::{From, TryFrom, TryInto}, @@ -54,43 +53,6 @@ pub use endpoint::*; pub mod resolution; pub use resolution::*; -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Property { - pub key: u64, - pub value: Vec, -} - -/// The kind of a `Sample`. -#[repr(u8)] -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -pub enum SampleKind { - /// if the `Sample` was issued by a `put` operation. - #[default] - Put = 0, - /// if the `Sample` was issued by a `delete` operation. - Delete = 1, -} - -impl fmt::Display for SampleKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - SampleKind::Put => write!(f, "PUT"), - SampleKind::Delete => write!(f, "DELETE"), - } - } -} - -impl TryFrom for SampleKind { - type Error = u64; - fn try_from(kind: u64) -> Result { - match kind { - 0 => Ok(SampleKind::Put), - 1 => Ok(SampleKind::Delete), - _ => Err(kind), - } - } -} - /// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] @@ -314,6 +276,8 @@ pub enum Priority { } impl Priority { + /// Default + pub const DEFAULT: Self = Self::Data; /// The lowest Priority pub const MIN: Self = Self::Background; /// The highest Priority @@ -354,6 +318,8 @@ pub enum Reliability { } impl Reliability { + pub const DEFAULT: Self = Self::BestEffort; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -374,6 +340,13 @@ pub struct Channel { pub reliability: Reliability, } +impl Channel { + pub const DEFAULT: Self = Self { + priority: Priority::DEFAULT, + reliability: Reliability::DEFAULT, + }; +} + /// The kind of congestion control. #[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] #[repr(u8)] @@ -383,51 +356,6 @@ pub enum CongestionControl { Block = 1, } -/// The subscription mode. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -#[repr(u8)] -pub enum SubMode { - #[default] - Push = 0, - Pull = 1, -} - -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub struct SubInfo { - pub reliability: Reliability, - pub mode: SubMode, -} - -#[derive(Debug, Default, Clone, PartialEq, Eq, Hash)] -pub struct QueryableInfo { - pub complete: u64, // Default 0: incomplete - pub distance: u64, // Default 0: no distance -} - -/// The kind of consolidation. -#[derive(Debug, Clone, PartialEq, Eq, Copy)] -pub enum ConsolidationMode { - /// No consolidation applied: multiple samples may be received for the same key-timestamp. - None, - /// Monotonic consolidation immediately forwards samples, except if one with an equal or more recent timestamp - /// has already been sent with the same key. - /// - /// This optimizes latency while potentially reducing bandwidth. - /// - /// Note that this doesn't cause re-ordering, but drops the samples for which a more recent timestamp has already - /// been observed with the same key. - Monotonic, - /// Holds back samples to only send the set of samples that had the highest timestamp for their key. - Latest, -} - -/// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] -pub enum QueryTarget { - #[default] - BestMatching, - All, - AllComplete, - #[cfg(feature = "complete_n")] - Complete(u64), +impl CongestionControl { + pub const DEFAULT: Self = Self::Drop; } diff --git a/commons/zenoh-protocol/src/core/wire_expr.rs b/commons/zenoh-protocol/src/core/wire_expr.rs index 7b0dee7471..6d9623d6ca 100644 --- a/commons/zenoh-protocol/src/core/wire_expr.rs +++ b/commons/zenoh-protocol/src/core/wire_expr.rs @@ -257,7 +257,7 @@ impl WireExpr<'_> { WireExpr { scope, suffix: suffix.into(), - mapping: Mapping::default(), + mapping: Mapping::DEFAULT, } } } diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 76415d52f5..1568029cc6 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -156,6 +156,8 @@ pub enum Mode { } impl Mode { + pub const DEFAULT: Self = Self::Push; + #[cfg(feature = "test")] fn rand() -> Self { use rand::Rng; @@ -344,7 +346,7 @@ pub mod subscriber { /// - if P==1 then the subscription is pull, else it is push /// - rsv: Reserved /// ``` - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct SubscriberInfo { pub reliability: Reliability, pub mode: Mode, @@ -354,6 +356,11 @@ pub mod subscriber { pub const R: u64 = 1; pub const P: u64 = 1 << 1; + pub const DEFAULT: Self = Self { + reliability: Reliability::DEFAULT, + mode: Mode::DEFAULT, + }; + #[cfg(feature = "test")] pub fn rand() -> Self { let reliability = Reliability::rand(); @@ -363,6 +370,12 @@ pub mod subscriber { } } + impl Default for SubscriberInfo { + fn default() -> Self { + Self::DEFAULT + } + } + impl From for SubscriberInfo { fn from(ext: Info) -> Self { let reliability = if imsg::has_option(ext.value, SubscriberInfo::R) { @@ -502,13 +515,18 @@ pub mod queryable { /// +---------------+ /// ~ distance ~ /// +---------------+ - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct QueryableInfo { pub complete: u8, // Default 0: incomplete // @TODO: maybe a bitflag pub distance: u32, // Default 0: no distance } impl QueryableInfo { + pub const DEFAULT: Self = Self { + complete: 0, + distance: 0, + }; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -520,6 +538,12 @@ pub mod queryable { } } + impl Default for QueryableInfo { + fn default() -> Self { + Self::DEFAULT + } + } + impl From for QueryableInfo { fn from(ext: Info) -> Self { let complete = ext.value as u8; diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 1be58db5cc..6807488873 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -51,6 +51,8 @@ pub enum Mapping { } impl Mapping { + pub const DEFAULT: Self = Self::Receiver; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -226,6 +228,16 @@ pub mod ext { const D_FLAG: u8 = 0b00001000; const E_FLAG: u8 = 0b00010000; + pub const DEFAULT: Self = Self::new(Priority::DEFAULT, CongestionControl::DEFAULT, false); + + pub const DECLARE: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const PUSH: Self = Self::new(Priority::DEFAULT, CongestionControl::Drop, false); + pub const REQUEST: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const RESPONSE: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const RESPONSE_FINAL: Self = + Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const OAM: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const fn new( priority: Priority, congestion_control: CongestionControl, @@ -275,35 +287,11 @@ pub mod ext { let inner: u8 = rng.gen(); Self { inner } } - - pub fn declare_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn push_default() -> Self { - Self::new(Priority::default(), CongestionControl::Drop, false) - } - - pub fn request_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn response_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn response_final_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn oam_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } } impl Default for QoSType<{ ID }> { fn default() -> Self { - Self::new(Priority::default(), CongestionControl::default(), false) + Self::new(Priority::DEFAULT, CongestionControl::DEFAULT, false) } } @@ -371,6 +359,9 @@ pub mod ext { } impl NodeIdType<{ ID }> { + // node_id == 0 means the message has been generated by the node itself + pub const DEFAULT: Self = Self { node_id: 0 }; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -382,8 +373,7 @@ pub mod ext { impl Default for NodeIdType<{ ID }> { fn default() -> Self { - // node_id == 0 means the message has been generated by the node itself - Self { node_id: 0 } + Self::DEFAULT } } diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index 9e0137ea3a..aba6bb057a 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -66,7 +66,6 @@ pub struct Request { pub mod ext { use crate::{ common::{ZExtZ64, ZExtZBuf}, - core::QueryTarget, zextz64, zextzbuf, }; use core::{num::NonZeroU32, time::Duration}; @@ -88,9 +87,19 @@ pub mod ext { /// +---------------+ /// /// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. - pub type TargetType = QueryTarget; + #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] + pub enum TargetType { + #[default] + BestMatching, + All, + AllComplete, + #[cfg(feature = "complete_n")] + Complete(u64), + } impl TargetType { + pub const DEFAULT: Self = Self::BestMatching; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::prelude::*; diff --git a/commons/zenoh-protocol/src/transport/mod.rs b/commons/zenoh-protocol/src/transport/mod.rs index cdf994e5dd..307389f8c9 100644 --- a/commons/zenoh-protocol/src/transport/mod.rs +++ b/commons/zenoh-protocol/src/transport/mod.rs @@ -75,13 +75,18 @@ pub enum TransportBodyLowLatency { pub type TransportSn = u32; -#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct PrioritySn { pub reliable: TransportSn, pub best_effort: TransportSn, } impl PrioritySn { + pub const DEFAULT: Self = Self { + reliable: TransportSn::MIN, + best_effort: TransportSn::MIN, + }; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -252,7 +257,8 @@ pub mod ext { } impl QoSType<{ ID }> { - pub const P_MASK: u8 = 0b00000111; + const P_MASK: u8 = 0b00000111; + pub const DEFAULT: Self = Self::new(Priority::DEFAULT); pub const fn new(priority: Priority) -> Self { Self { @@ -276,7 +282,7 @@ pub mod ext { impl Default for QoSType<{ ID }> { fn default() -> Self { - Self::new(Priority::default()) + Self::DEFAULT } } diff --git a/commons/zenoh-protocol/src/zenoh/ack.rs b/commons/zenoh-protocol/src/zenoh/ack.rs deleted file mode 100644 index d40bf58791..0000000000 --- a/commons/zenoh-protocol/src/zenoh/ack.rs +++ /dev/null @@ -1,84 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::common::ZExtUnknown; -use alloc::vec::Vec; -use uhlc::Timestamp; - -/// # Ack message -/// -/// ```text -/// Flags: -/// - T: Timestamp If T==1 then the timestamp if present -/// - X: Reserved -/// - Z: Extension If Z==1 then at least one extension is present -/// -/// 7 6 5 4 3 2 1 0 -/// +-+-+-+-+-+-+-+-+ -/// |Z|X|T| ACK | -/// +-+-+-+---------+ -/// ~ ts: ~ if T==1 -/// +---------------+ -/// ~ [err_exts] ~ if Z==1 -/// +---------------+ -/// ``` -pub mod flag { - pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present - // pub const X: u8 = 1 << 6; // 0x40 Reserved - pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Ack { - pub timestamp: Option, - pub ext_sinfo: Option, - pub ext_unknown: Vec, -} - -pub mod ext { - use crate::{common::ZExtZBuf, zextzbuf}; - - /// # SourceInfo extension - /// Used to carry additional information about the source of data - pub type SourceInfo = zextzbuf!(0x1, false); - pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; -} - -impl Ack { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; - use rand::Rng; - let mut rng = rand::thread_rng(); - - let timestamp = rng.gen_bool(0.5).then_some({ - let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); - Timestamp::new(time, id) - }); - let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let mut ext_unknown = Vec::new(); - for _ in 0..rng.gen_range(0..4) { - ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::SourceInfo::ID) + 1, - false, - )); - } - - Self { - timestamp, - ext_sinfo, - ext_unknown, - } - } -} diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index a23eaa9b21..d73d8cdd06 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -pub mod ack; pub mod del; pub mod err; pub mod pull; @@ -20,7 +19,6 @@ pub mod query; pub mod reply; use crate::core::Encoding; -pub use ack::Ack; pub use del::Del; pub use err::Err; pub use pull::Pull; @@ -35,8 +33,7 @@ pub mod id { pub const QUERY: u8 = 0x03; pub const REPLY: u8 = 0x04; pub const ERR: u8 = 0x05; - pub const ACK: u8 = 0x06; - pub const PULL: u8 = 0x07; + pub const PULL: u8 = 0x06; } // DataInfo @@ -127,7 +124,6 @@ impl From for RequestBody { #[derive(Debug, Clone, PartialEq, Eq)] pub enum ResponseBody { Reply(Reply), - Ack(Ack), Err(Err), Put(Put), } @@ -138,11 +134,10 @@ impl ResponseBody { use rand::Rng; let mut rng = rand::thread_rng(); - match rng.gen_range(0..4) { + match rng.gen_range(0..3) { 0 => ResponseBody::Reply(Reply::rand()), - 1 => ResponseBody::Ack(Ack::rand()), - 2 => ResponseBody::Err(Err::rand()), - 3 => ResponseBody::Put(Put::rand()), + 1 => ResponseBody::Err(Err::rand()), + 2 => ResponseBody::Put(Put::rand()), _ => unreachable!(), } } @@ -160,12 +155,6 @@ impl From for ResponseBody { } } -impl From for ResponseBody { - fn from(r: Ack) -> ResponseBody { - ResponseBody::Ack(r) - } -} - pub mod ext { use zenoh_buffers::ZBuf; diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index 17dfa23df8..ac53b963f5 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::ConsolidationMode}; +use crate::common::ZExtUnknown; use alloc::{string::String, vec::Vec}; /// The kind of consolidation. @@ -38,6 +38,8 @@ pub enum Consolidation { } impl Consolidation { + pub const DEFAULT: Self = Self::Auto; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::prelude::SliceRandom; @@ -55,16 +57,6 @@ impl Consolidation { } } -impl From for Consolidation { - fn from(val: ConsolidationMode) -> Self { - match val { - ConsolidationMode::None => Consolidation::None, - ConsolidationMode::Monotonic => Consolidation::Monotonic, - ConsolidationMode::Latest => Consolidation::Latest, - } - } -} - /// # Query message /// /// ```text diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 3e130e0608..b698cbc80b 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -23,7 +23,7 @@ fn main() { env_logger::init(); let args = Args::parse(); - let mut prio = Priority::default(); + let mut prio = Priority::DEFAULT; if let Some(p) = args.priority { prio = p.try_into().unwrap(); } diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 4139a65a05..a6aad76f7b 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -574,12 +574,12 @@ mod tests { let tmsg: TransportMessage = KeepAlive.into(); let nmsg: NetworkMessage = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::new(Priority::default(), CongestionControl::Block, false), + ext_qos: ext::QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -601,7 +601,7 @@ mod tests { let mut frame = FrameHeader { reliability: Reliability::Reliable, sn: 0, - ext_qos: frame::ext::QoSType::default(), + ext_qos: frame::ext::QoSType::DEFAULT, }; // Serialize with a frame diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 954c656280..eebf23abc9 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -513,7 +513,7 @@ impl TransmissionPipeline { let mut stage_in = vec![]; let mut stage_out = vec![]; - let default_queue_size = [config.queue_size[Priority::default() as usize]]; + let default_queue_size = [config.queue_size[Priority::DEFAULT as usize]]; let size_iter = if priority.len() == 1 { default_queue_size.iter() } else { @@ -602,7 +602,7 @@ impl TransmissionPipelineProducer { let priority = msg.priority(); (priority as usize, priority) } else { - (0, Priority::default()) + (0, Priority::DEFAULT) }; // Lock the channel. We are the only one that will be writing on it. let mut queue = zlock!(self.stage_in[idx]); @@ -751,10 +751,10 @@ mod tests { wire_expr: key, ext_qos: ext::QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -881,10 +881,10 @@ mod tests { wire_expr: key, ext_qos: ext::QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -993,10 +993,10 @@ mod tests { false, ), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 21ed0b3fdf..b24c077c57 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -483,7 +483,7 @@ async fn tx_task( .collect::>(); let (next_sn, ext_qos) = if next_sns.len() == Priority::NUM { let tmp: [PrioritySn; Priority::NUM] = next_sns.try_into().unwrap(); - (PrioritySn::default(), Some(Box::new(tmp))) + (PrioritySn::DEFAULT, Some(Box::new(tmp))) } else { (next_sns[0], None) }; diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 14f2fd619c..dedef2149c 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -145,7 +145,7 @@ impl TransportMulticastInner { let priority = ext_qos.priority(); let c = if self.is_qos() { &peer.priority_rx[priority as usize] - } else if priority == Priority::default() { + } else if priority == Priority::DEFAULT { &peer.priority_rx[0] } else { bail!( @@ -181,7 +181,7 @@ impl TransportMulticastInner { let priority = ext_qos.priority(); let c = if self.is_qos() { &peer.priority_rx[priority as usize] - } else if priority == Priority::default() { + } else if priority == Priority::DEFAULT { &peer.priority_rx[0] } else { bail!( diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 8b0e93f494..6f98cafc14 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -167,7 +167,6 @@ pub fn map_zmsg_to_shminfo(msg: &mut NetworkMessage) -> ZResult { ResponseBody::Reply(b) => b.map_to_shminfo(), ResponseBody::Put(b) => b.map_to_shminfo(), ResponseBody::Err(b) => b.map_to_shminfo(), - ResponseBody::Ack(_) => Ok(false), }, NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), } @@ -222,7 +221,6 @@ pub fn map_zmsg_to_shmbuf( ResponseBody::Put(b) => b.map_to_shmbuf(shmr), ResponseBody::Err(b) => b.map_to_shmbuf(shmr), ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), - ResponseBody::Ack(_) => Ok(false), }, NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), } diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index e9916be7e6..0db9e1c93a 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -// use super::properties::EstablishmentProperties; use crate::unicast::establishment::ext; use std::convert::TryFrom; use zenoh_buffers::{ diff --git a/io/zenoh-transport/src/unicast/establishment/properties.rs b/io/zenoh-transport/src/unicast/establishment/properties.rs deleted file mode 100644 index e259b650ab..0000000000 --- a/io/zenoh-transport/src/unicast/establishment/properties.rs +++ /dev/null @@ -1,132 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use std::{ - convert::TryFrom, - ops::{Deref, DerefMut}, -}; -use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZBuf}; -use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::core::Property; -use zenoh_result::{bail, zerror, Error as ZError, ZResult}; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct EstablishmentProperties(Vec); - -impl Deref for EstablishmentProperties { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for EstablishmentProperties { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl EstablishmentProperties { - pub(super) fn new() -> Self { - EstablishmentProperties(vec![]) - } - - pub(super) fn insert(&mut self, p: Property) -> ZResult<()> { - if self.0.iter().any(|x| x.key == p.key) { - bail!("Property {} already exists", p.key) - } - self.0.push(p); - Ok(()) - } - - pub(super) fn remove(&mut self, key: u64) -> Option { - self.0 - .iter() - .position(|x| x.key == key) - .map(|i| self.0.remove(i)) - } -} - -impl TryFrom<&EstablishmentProperties> for Attachment { - type Error = ZError; - - fn try_from(eps: &EstablishmentProperties) -> Result { - if eps.is_empty() { - bail!("Can not create an attachment with zero properties") - } - - let mut zbuf = ZBuf::empty(); - let mut writer = zbuf.writer(); - let codec = Zenoh080::new(); - - codec - .write(&mut writer, eps.0.as_slice()) - .map_err(|_| zerror!(""))?; - - let attachment = Attachment::new(zbuf); - Ok(attachment) - } -} - -impl TryFrom> for EstablishmentProperties { - type Error = ZError; - - fn try_from(mut ps: Vec) -> Result { - let mut eps = EstablishmentProperties::new(); - for p in ps.drain(..) { - eps.insert(p)?; - } - - Ok(eps) - } -} - -impl TryFrom<&Attachment> for EstablishmentProperties { - type Error = ZError; - - fn try_from(att: &Attachment) -> Result { - let mut reader = att.buffer.reader(); - let codec = Zenoh080::new(); - - let ps: Vec = codec.read(&mut reader).map_err(|_| zerror!(""))?; - EstablishmentProperties::try_from(ps) - } -} - -impl EstablishmentProperties { - #[cfg(test)] - pub fn rand() -> Self { - use rand::Rng; - - const MIN: usize = 1; - const MAX: usize = 8; - - let mut rng = rand::thread_rng(); - - let mut eps = EstablishmentProperties::new(); - for _ in MIN..=MAX { - loop { - let key: u64 = rng.gen(); - let mut value = vec![0u8; rng.gen_range(MIN..=MAX)]; - rng.fill(&mut value[..]); - let p = Property { key, value }; - if eps.insert(p).is_ok() { - break; - } - } - } - - eps - } -} diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 935a1814b0..04af432aef 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -81,7 +81,7 @@ impl TransportUnicastUniversal { let priority = ext_qos.priority(); let c = if self.is_qos() { &self.priority_rx[priority as usize] - } else if priority == Priority::default() { + } else if priority == Priority::DEFAULT { &self.priority_rx[0] } else { bail!( @@ -124,7 +124,7 @@ impl TransportUnicastUniversal { let c = if self.is_qos() { &self.priority_rx[qos.priority() as usize] - } else if qos.priority() == Priority::default() { + } else if qos.priority() == Priority::DEFAULT { &self.priority_rx[0] } else { bail!( diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index f8e56a5484..4d1196e10f 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -269,11 +269,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -363,7 +363,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index ebb290af1e..fe5a44b7ee 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -265,11 +265,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -359,7 +359,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index 323c6f529e..dd4f55b5f5 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -297,11 +297,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, cctrl, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -442,7 +442,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -472,7 +472,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -505,7 +505,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -535,7 +535,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index d13f763b68..4e90432193 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -194,13 +194,13 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec, endpoint02: Vec, client_transport: TransportUn wire_expr: "test".into(), ext_qos: QoSType::new(*p, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; *ms].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index f9180849af..d12a9db7dc 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -271,13 +271,13 @@ mod tests { let message: NetworkMessage = Push { wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::default(), CongestionControl::Block, false), + ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, ext_shm: None, ext_attachment: None, @@ -319,13 +319,13 @@ mod tests { let message: NetworkMessage = Push { wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::default(), CongestionControl::Block, false), + ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, ext_shm: None, ext_attachment: None, diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 19380eb49e..db73e99480 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -78,11 +78,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; MSG_SIZE].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 11839aef2a..795ea90b41 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -468,11 +468,11 @@ async fn test_transport( wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, cctrl, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -614,7 +614,7 @@ fn transport_unicast_tcp_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -644,7 +644,7 @@ fn transport_unicast_tcp_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -677,7 +677,7 @@ fn transport_unicast_udp_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -707,7 +707,7 @@ fn transport_unicast_udp_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -739,7 +739,7 @@ fn transport_unicast_unix_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -773,7 +773,7 @@ fn transport_unicast_unix_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -808,11 +808,11 @@ fn transport_unicast_ws_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -846,11 +846,11 @@ fn transport_unicast_ws_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -887,7 +887,7 @@ fn transport_unicast_unixpipe_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -921,7 +921,7 @@ fn transport_unicast_unixpipe_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -956,7 +956,7 @@ fn transport_unicast_tcp_udp() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -996,7 +996,7 @@ fn transport_unicast_tcp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1038,7 +1038,7 @@ fn transport_unicast_udp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1083,7 +1083,7 @@ fn transport_unicast_tcp_udp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1130,11 +1130,11 @@ fn transport_unicast_tls_only_server() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1184,11 +1184,11 @@ fn transport_unicast_quic_only_server() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1256,11 +1256,11 @@ fn transport_unicast_tls_only_mutual_success() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1323,11 +1323,11 @@ fn transport_unicast_tls_only_mutual_no_client_certs_failure() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1403,11 +1403,11 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index a2987f8833..83de47779c 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -290,7 +290,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> session: self.session, key_expr: self.key_expr, key_space: crate::LivelinessSpace, - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, origin: Locality::default(), fetch, handler: self.handler, @@ -334,11 +334,11 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> session: self.session, key_expr: self.key_expr, key_space: crate::LivelinessSpace, - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, origin: Locality::default(), query_selector: None, - query_target: QueryTarget::default(), - query_consolidation: QueryConsolidation::default(), + query_target: QueryTarget::DEFAULT, + query_consolidation: QueryConsolidation::DEFAULT, query_accept_replies: ReplyKeyExpr::MatchingQuery, query_timeout: Duration::from_secs(10), handler: self.handler, diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index d2295f9798..36c696000a 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -633,9 +633,9 @@ impl SyncResolve for KeyExprUndeclaration<'_> { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(zenoh_protocol::network::Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareKeyExpr(UndeclareKeyExpr { id: expr_id }), }); diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 0883041bb7..26a803fa43 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -549,7 +549,7 @@ where &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), Locality::default(), callback, - &SubscriberInfo::default(), + &SubscriberInfo::DEFAULT, ) .map(|sub_state| Subscriber { subscriber: SubscriberInner { @@ -747,8 +747,8 @@ where .query( &self.key_expr?.into(), &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), - QueryTarget::default(), - QueryConsolidation::default(), + QueryTarget::DEFAULT, + QueryConsolidation::DEFAULT, Locality::default(), self.timeout, None, diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index ffe2d3ccca..d6497a80b3 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -593,9 +593,9 @@ pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireE for (key_expr, payload) in route { face.primitives.send_push(Push { wire_expr: key_expr, - ext_qos: ext::QoSType::push_default(), + ext_qos: ext::QoSType::PUSH, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload, }); } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index a6748650ab..e8e84395f8 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -494,7 +494,6 @@ macro_rules! inc_res_stats { e.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), ); } - ResponseBody::Ack(_) => (), } } } @@ -555,14 +554,14 @@ pub fn route_query( for (wexpr, payload) in local_replies { let payload = ResponseBody::Reply(Reply { - consolidation: Consolidation::default(), // @TODO: handle Del case - ext_unknown: vec![], // @TODO: handle unknown extensions + consolidation: Consolidation::DEFAULT, // @TODO: handle Del case + ext_unknown: vec![], // @TODO: handle unknown extensions payload: ReplyBody::Put(Put { // @TODO: handle Del case - timestamp: None, // @TODO: handle timestamp - encoding: Encoding::default(), // @TODO: handle encoding - ext_sinfo: None, // @TODO: handle source info - ext_attachment: None, // @TODO: expose it in the API + timestamp: None, // @TODO: handle timestamp + encoding: Encoding::DEFAULT, // @TODO: handle encoding + ext_sinfo: None, // @TODO: handle source info + ext_attachment: None, // @TODO: expose it in the API #[cfg(feature = "shared-memory")] ext_shm: None, ext_unknown: vec![], // @TODO: handle unknown extensions @@ -583,7 +582,7 @@ pub fn route_query( rid: qid, wire_expr: wexpr, payload, - ext_qos: response::ext::QoSType::declare_default(), + ext_qos: response::ext::QoSType::DECLARE, ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid, @@ -605,7 +604,7 @@ pub fn route_query( .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), + ext_qos: response::ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }, expr.full_expr().to_string(), @@ -636,7 +635,7 @@ pub fn route_query( Request { id: *qid, wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), + ext_qos: ext::QoSType::REQUEST, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: *context }, ext_target: *t, @@ -672,7 +671,7 @@ pub fn route_query( Request { id: *qid, wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), + ext_qos: ext::QoSType::REQUEST, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: *context }, ext_target: target, @@ -693,7 +692,7 @@ pub fn route_query( .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), + ext_qos: response::ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }, expr.full_expr().to_string(), @@ -711,7 +710,7 @@ pub fn route_query( .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), + ext_qos: response::ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }, "".to_string(), @@ -758,7 +757,7 @@ pub(crate) fn route_send_response( rid: query.src_qid, wire_expr: key_expr.to_owned(), payload: body, - ext_qos: response::ext::QoSType::response_default(), + ext_qos: response::ext::QoSType::RESPONSE, ext_tstamp: None, ext_respid, }, @@ -818,7 +817,7 @@ pub(crate) fn finalize_pending_query(query: Arc) { .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: query.src_qid, - ext_qos: response::ext::QoSType::response_final_default(), + ext_qos: response::ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }, "".to_string(), diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 7fc71c623d..fb4dec4ad5 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -466,9 +466,9 @@ impl Resource { .insert(expr_id, nonwild_prefix.clone()); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: expr_id, wire_expr: nonwild_prefix.expr().into(), diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 7becff4b4d..6f71ef443a 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -49,9 +49,9 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, @@ -137,9 +137,9 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: res.expr().into(), @@ -171,9 +171,9 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -209,9 +209,9 @@ pub(super) fn undeclare_client_subscription( let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 35a10557dc..667ff63c0e 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -96,9 +96,9 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -166,9 +166,9 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // TODO ext_wire_expr: WireExprType { wire_expr }, @@ -431,9 +431,9 @@ pub(super) fn undeclare_client_subscription( let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // TODO ext_wire_expr: WireExprType { wire_expr }, @@ -467,9 +467,9 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // TODO wire_expr: key_expr, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 6281993c93..03a1e11e67 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -137,7 +137,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, @@ -177,9 +177,9 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -347,7 +347,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, @@ -373,9 +373,9 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index ae3fda51a7..cf4d201867 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -214,7 +214,7 @@ impl Network { Ok(NetworkBody::OAM(Oam { id: OAM_LINKSTATE, body: ZExtBody::ZBuf(buf), - ext_qos: oam::ext::QoSType::oam_default(), + ext_qos: oam::ext::QoSType::OAM, ext_tstamp: None, }) .into()) diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 8f91335f0a..97677893aa 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -49,9 +49,9 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, @@ -137,9 +137,9 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: res.expr().into(), @@ -171,9 +171,9 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -209,9 +209,9 @@ pub(super) fn undeclare_client_subscription( let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 35a10557dc..667ff63c0e 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -96,9 +96,9 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -166,9 +166,9 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -422,9 +422,9 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -587,9 +587,9 @@ pub(super) fn undeclare_client_subscription( let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -623,9 +623,9 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, @@ -650,9 +650,9 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, @@ -790,9 +790,9 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: let wire_expr = Resource::get_best_key(res, "", dst_face.id); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber( UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) @@ -815,9 +815,9 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 90944a524f..dfffe42e0d 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -208,7 +208,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, @@ -258,9 +258,9 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -488,7 +488,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, @@ -514,9 +514,9 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -785,9 +785,9 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, @@ -884,9 +884,9 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let wire_expr = Resource::get_best_key(res, "", dst_face.id); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable( UndeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) @@ -908,9 +908,9 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index f6fb13e76e..227dd035f4 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -270,9 +270,9 @@ impl AdminSpace { zlock!(admin.primitives).replace(primitives.clone()); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: [&root_key, "/**"].concat().into(), @@ -284,13 +284,13 @@ impl AdminSpace { }); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) wire_expr: [&root_key, "/config/**"].concat().into(), - ext_info: SubscriberInfo::default(), + ext_info: SubscriberInfo::DEFAULT, }), }); } @@ -392,7 +392,7 @@ impl Primitives for AdminSpace { ); primitives.send_response_final(ResponseFinal { rid: msg.id, - ext_qos: ext::QoSType::response_final_default(), + ext_qos: ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }); return; @@ -405,7 +405,7 @@ impl Primitives for AdminSpace { log::error!("Unknown KeyExpr: {}", e); primitives.send_response_final(ResponseFinal { rid: msg.id, - ext_qos: ext::QoSType::response_final_default(), + ext_qos: ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }); return; diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 363803f682..57f6a6dcbc 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -497,9 +497,9 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 11, wire_expr: "test/client".into(), @@ -523,9 +523,9 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 12, wire_expr: WireExpr::from(11).with_suffix("/z1_pub1"), @@ -544,9 +544,9 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 21, wire_expr: "test/client".into(), @@ -570,9 +570,9 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 22, wire_expr: WireExpr::from(21).with_suffix("/z2_pub1"), @@ -591,9 +591,9 @@ fn client_test() { Primitives::send_declare( primitives2.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 31, wire_expr: "test/client".into(), @@ -617,10 +617,10 @@ fn client_test() { &tables, &face0.upgrade().unwrap(), &"test/client/z1_wr1".into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -650,10 +650,10 @@ fn client_test() { &router.tables, &face0.upgrade().unwrap(), &WireExpr::from(11).with_suffix("/z1_wr2"), - ext::QoSType::default(), + ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -683,10 +683,10 @@ fn client_test() { &router.tables, &face1.upgrade().unwrap(), &"test/client/**".into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -716,10 +716,10 @@ fn client_test() { &router.tables, &face0.upgrade().unwrap(), &12.into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -749,10 +749,10 @@ fn client_test() { &router.tables, &face1.upgrade().unwrap(), &22.into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::DEFAULT, ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 36a841d1ef..ad28470f63 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -50,9 +50,7 @@ pub(crate) mod common { pub use crate::sample::Locality; #[cfg(not(feature = "unstable"))] pub(crate) use crate::sample::Locality; - pub use crate::sample::Sample; - - pub use zenoh_protocol::core::SampleKind; + pub use crate::sample::{Sample, SampleKind}; pub use crate::publication::Priority; #[zenoh_macros::unstable] diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 843190ad45..58c7c5c367 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -811,7 +811,7 @@ fn resolve_put( false, ), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: match kind { SampleKind::Put => { #[allow(unused_mut)] @@ -887,6 +887,8 @@ pub enum Priority { } impl Priority { + /// Default + pub const DEFAULT: Self = Self::Data; /// The lowest Priority pub const MIN: Self = Self::Background; /// The highest Priority @@ -1328,7 +1330,6 @@ mod tests { #[test] fn sample_kind_integrity_in_publication() { use crate::{open, prelude::sync::*}; - use zenoh_protocol::core::SampleKind; const KEY_EXPR: &str = "test/sample_kind_integrity/publication"; const VALUE: &str = "zenoh"; @@ -1351,7 +1352,6 @@ mod tests { #[test] fn sample_kind_integrity_in_put_builder() { use crate::{open, prelude::sync::*}; - use zenoh_protocol::core::SampleKind; const KEY_EXPR: &str = "test/sample_kind_integrity/put_builder"; const VALUE: &str = "zenoh"; diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index c4f3fb35e9..7a7a867cd8 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -13,7 +13,6 @@ // //! Query primitives. - use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; #[zenoh_macros::unstable] @@ -23,13 +22,38 @@ use std::collections::HashMap; use std::future::Ready; use std::time::Duration; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_protocol::zenoh::query::Consolidation; use zenoh_result::ZResult; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). -pub use zenoh_protocol::core::QueryTarget; +pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; /// The kind of consolidation. -pub use zenoh_protocol::core::ConsolidationMode; +#[derive(Debug, Clone, PartialEq, Eq, Copy)] +pub enum ConsolidationMode { + /// No consolidation applied: multiple samples may be received for the same key-timestamp. + None, + /// Monotonic consolidation immediately forwards samples, except if one with an equal or more recent timestamp + /// has already been sent with the same key. + /// + /// This optimizes latency while potentially reducing bandwidth. + /// + /// Note that this doesn't cause re-ordering, but drops the samples for which a more recent timestamp has already + /// been observed with the same key. + Monotonic, + /// Holds back samples to only send the set of samples that had the highest timestamp for their key. + Latest, +} + +impl From for Consolidation { + fn from(val: ConsolidationMode) -> Self { + match val { + ConsolidationMode::None => Consolidation::None, + ConsolidationMode::Monotonic => Consolidation::Monotonic, + ConsolidationMode::Latest => Consolidation::Latest, + } + } +} /// The operation: either manual or automatic. #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -45,6 +69,7 @@ pub struct QueryConsolidation { } impl QueryConsolidation { + pub const DEFAULT: Self = Self::AUTO; /// Automatic query consolidation strategy selection. pub const AUTO: Self = Self { mode: Mode::Auto }; @@ -72,7 +97,7 @@ impl From for QueryConsolidation { impl Default for QueryConsolidation { fn default() -> Self { - QueryConsolidation::AUTO + Self::DEFAULT } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 4e9f4914dd..d0ce99b512 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -56,7 +56,7 @@ impl Drop for QueryInner { fn drop(&mut self) { self.primitives.send_response_final(ResponseFinal { rid: self.qid, - ext_qos: response::ext::QoSType::response_final_default(), + ext_qos: response::ext::QoSType::RESPONSE_FINAL, ext_tstamp: None, }); } @@ -241,7 +241,7 @@ impl SyncResolve for ReplyBuilder<'_> { mapping: Mapping::Sender, }, payload: ResponseBody::Reply(zenoh::Reply { - consolidation: zenoh::Consolidation::default(), + consolidation: zenoh::Consolidation::DEFAULT, ext_unknown: vec![], payload: match kind { SampleKind::Put => ReplyBody::Put(Put { @@ -262,7 +262,7 @@ impl SyncResolve for ReplyBuilder<'_> { }), }, }), - ext_qos: response::ext::QoSType::response_default(), + ext_qos: response::ext::QoSType::RESPONSE, ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid: self.query.inner.zid, @@ -292,7 +292,7 @@ impl SyncResolve for ReplyBuilder<'_> { }), code: 0, // TODO }), - ext_qos: response::ext::QoSType::response_default(), + ext_qos: response::ext::QoSType::RESPONSE, ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid: self.query.inner.zid, diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 5d707e5936..d41e8c83a1 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -14,13 +14,15 @@ //! Sample primitives use crate::buffers::ZBuf; -use crate::prelude::ZenohId; -use crate::prelude::{KeyExpr, SampleKind, Value}; +use crate::prelude::{KeyExpr, Value, ZenohId}; use crate::query::Reply; use crate::time::{new_reception_timestamp, Timestamp}; #[zenoh_macros::unstable] use serde::Serialize; -use std::convert::{TryFrom, TryInto}; +use std::{ + convert::{TryFrom, TryInto}, + fmt, +}; use zenoh_protocol::core::Encoding; pub type SourceSn = u64; @@ -311,6 +313,38 @@ mod attachment { } } } + +/// The kind of a `Sample`. +#[repr(u8)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] +pub enum SampleKind { + /// if the `Sample` was issued by a `put` operation. + #[default] + Put = 0, + /// if the `Sample` was issued by a `delete` operation. + Delete = 1, +} + +impl fmt::Display for SampleKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SampleKind::Put => write!(f, "PUT"), + SampleKind::Delete => write!(f, "DELETE"), + } + } +} + +impl TryFrom for SampleKind { + type Error = u64; + fn try_from(kind: u64) -> Result { + match kind { + 0 => Ok(SampleKind::Put), + 1 => Ok(SampleKind::Delete), + _ => Err(kind), + } + } +} + #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 46cfd5e499..329e44e43f 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -296,7 +296,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { SubscriberBuilder { session: self.clone(), key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, mode: PushMode, origin: Locality::default(), handler: DefaultHandler, @@ -329,8 +329,8 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { PublisherBuilder { session: self.clone(), key_expr: key_expr.try_into().map_err(Into::into), - congestion_control: CongestionControl::default(), - priority: Priority::default(), + congestion_control: CongestionControl::DEFAULT, + priority: Priority::DEFAULT, destination: Locality::default(), } } @@ -775,8 +775,8 @@ impl Session { session: self, selector, scope: Ok(None), - target: QueryTarget::default(), - consolidation: QueryConsolidation::default(), + target: QueryTarget::DEFAULT, + consolidation: QueryConsolidation::DEFAULT, destination: Locality::default(), timeout, value: None, @@ -858,9 +858,9 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: expr_id, wire_expr: WireExpr { @@ -1059,9 +1059,9 @@ impl Session { // }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: id as u32, wire_expr: key_expr.to_wire(self).to_owned(), @@ -1124,9 +1124,9 @@ impl Session { let wire_expr = WireExpr::from(join_sub).to_owned(); drop(state); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, @@ -1149,9 +1149,9 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { @@ -1205,9 +1205,9 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: id as u32, wire_expr: key_expr.to_owned(), @@ -1233,9 +1233,9 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: id as u32, wire_expr: key_expr.to_owned(), @@ -1298,9 +1298,9 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: qable_state.key_expr.clone(), @@ -1317,9 +1317,9 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) wire_expr: qable_state.key_expr.clone(), @@ -1333,9 +1333,9 @@ impl Session { // There are no more Queryables on the same KeyExpr. drop(state); primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { id: 0, // @TODO use proper QueryableId (#703) ext_wire_expr: WireExprType { @@ -1369,13 +1369,13 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: id as u32, wire_expr: key_expr.to_wire(self).to_owned(), - ext_info: SubscriberInfo::default(), + ext_info: SubscriberInfo::DEFAULT, }), }); Ok(tok_state) @@ -1393,9 +1393,9 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { @@ -1698,10 +1698,10 @@ impl Session { primitives.send_request(Request { id: 0, // @TODO compute a proper request ID wire_expr: key_expr.to_wire(self).to_owned(), - ext_qos: ext::QoSType::request_default(), + ext_qos: ext::QoSType::REQUEST, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - ext_target: request::ext::TargetType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, + ext_target: request::ext::TargetType::DEFAULT, ext_budget: None, ext_timeout: None, payload: RequestBody::Pull(Pull { @@ -1801,9 +1801,9 @@ impl Session { primitives.send_request(Request { id: qid, wire_expr: wexpr.clone(), - ext_qos: request::ext::QoSType::request_default(), + ext_qos: request::ext::QoSType::REQUEST, ext_tstamp: None, - ext_nodeid: request::ext::NodeIdType::default(), + ext_nodeid: request::ext::NodeIdType::DEFAULT, ext_target: target, ext_budget: None, ext_timeout: Some(timeout), @@ -1959,7 +1959,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { SubscriberBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, mode: PushMode, origin: Locality::default(), handler: DefaultHandler, @@ -2040,8 +2040,8 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { PublisherBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), - congestion_control: CongestionControl::default(), - priority: Priority::default(), + congestion_control: CongestionControl::DEFAULT, + priority: Priority::DEFAULT, destination: Locality::default(), } } @@ -2247,11 +2247,6 @@ impl Primitives for Session { fn send_response(&self, msg: Response) { trace!("recv Response {:?}", msg); match msg.payload { - ResponseBody::Ack(_) => { - log::warn!( - "Received a ResponseBody::Ack, but this isn't supported yet. Dropping message." - ) - } ResponseBody::Put(_) => { log::warn!( "Received a ResponseBody::Put, but this isn't supported yet. Dropping message." diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 7258833d28..fe2236076f 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -25,9 +25,6 @@ use std::sync::Arc; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::network::declare::{subscriber::ext::SubscriberInfo, Mode}; -/// The subscription mode. -pub use zenoh_protocol::core::SubMode; - /// The kind of reliability. pub use zenoh_protocol::core::Reliability; @@ -117,7 +114,6 @@ impl<'a> PullSubscriberInner<'a> { /// ``` /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; - /// use zenoh::subscriber::SubMode; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session @@ -252,12 +248,6 @@ impl Drop for SubscriberInner<'_> { #[derive(Debug, Clone, Copy)] pub struct PullMode; -impl From for SubMode { - fn from(_: PullMode) -> Self { - SubMode::Pull - } -} - impl From for Mode { fn from(_: PullMode) -> Self { Mode::Pull @@ -269,12 +259,6 @@ impl From for Mode { #[derive(Debug, Clone, Copy)] pub struct PushMode; -impl From for SubMode { - fn from(_: PushMode) -> Self { - SubMode::Push - } -} - impl From for Mode { fn from(_: PushMode) -> Self { Mode::Push @@ -712,7 +696,6 @@ impl<'a, Receiver> PullSubscriber<'a, Receiver> { /// ``` /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; - /// use zenoh::subscriber::SubMode; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session From cc8d4a1f93f358ef3a951e0ae0fe27c5b3e41171 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 26 Feb 2024 12:30:09 +0100 Subject: [PATCH 003/357] Allow DeclareInterest for any keyexpr (#739) * Allow to DeclareInterest for any keyexpr * Remove forgotten println --- commons/zenoh-codec/src/network/declare.rs | 41 +-- commons/zenoh-protocol/src/network/declare.rs | 283 ++++++++++++++---- zenoh/src/net/routing/mod.rs | 2 +- 3 files changed, 249 insertions(+), 77 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index cf92b27c17..6df25a8d2a 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -24,6 +24,7 @@ use zenoh_protocol::{ network::{ declare::{ self, common, interest, keyexpr, queryable, subscriber, token, Declare, DeclareBody, + Interest, }, id, Mapping, }, @@ -845,24 +846,20 @@ where fn write(self, writer: &mut W, x: &interest::DeclareInterest) -> Self::Output { let interest::DeclareInterest { id, + interest: _, wire_expr, - interest, } = x; // Header - let mut header = declare::id::D_INTEREST; - if wire_expr.mapping != Mapping::DEFAULT { - header |= subscriber::flag::M; - } - if wire_expr.has_suffix() { - header |= subscriber::flag::N; - } + let header = declare::id::D_INTEREST | x.flags(); self.write(&mut *writer, header)?; // Body self.write(&mut *writer, id)?; - self.write(&mut *writer, wire_expr)?; - self.write(&mut *writer, interest.as_u8())?; + self.write(&mut *writer, x.options())?; + if let Some(we) = wire_expr.as_ref() { + self.write(&mut *writer, we)?; + } Ok(()) } @@ -894,14 +891,20 @@ where // Body let id: interest::InterestId = self.codec.read(&mut *reader)?; - let ccond = Zenoh080Condition::new(imsg::has_flag(self.header, token::flag::N)); - let mut wire_expr: WireExpr<'static> = ccond.read(&mut *reader)?; - wire_expr.mapping = if imsg::has_flag(self.header, token::flag::M) { - Mapping::Sender - } else { - Mapping::Receiver - }; - let interest: u8 = self.codec.read(&mut *reader)?; + let options: u8 = self.codec.read(&mut *reader)?; + let interest = Interest::from((imsg::flags(self.header), options)); + + let mut wire_expr = None; + if interest.restricted() { + let ccond = Zenoh080Condition::new(interest.named()); + let mut we: WireExpr<'static> = ccond.read(&mut *reader)?; + we.mapping = if interest.mapping() { + Mapping::Sender + } else { + Mapping::Receiver + }; + wire_expr = Some(we); + } // Extensions let has_ext = imsg::has_flag(self.header, token::flag::Z); @@ -911,8 +914,8 @@ where Ok(interest::DeclareInterest { id, + interest, wire_expr, - interest: interest.into(), }) } } diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 1568029cc6..8164d9440d 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -18,7 +18,6 @@ use crate::{ zextz64, zextzbuf, }; use alloc::borrow::Cow; -use core::ops::BitOr; pub use interest::*; pub use keyexpr::*; pub use queryable::*; @@ -703,13 +702,18 @@ pub mod token { } pub mod interest { + use core::{ + fmt::{self, Debug}, + ops::{Add, AddAssign, Sub, SubAssign}, + }; + use super::*; pub type InterestId = u32; pub mod flag { - pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix - pub const M: u8 = 1 << 6; // 0x40 Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver + pub const C: u8 = 1 << 5; // 0x20 Current if C==1 then the interest refers to the current declarations. + pub const F: u8 = 1 << 6; // 0x40 Future if F==1 then the interest refers to the future declarations. pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } @@ -753,21 +757,23 @@ pub mod interest { /// /// ```text /// Flags: - /// - N: Named If N==1 then the key expr has name/suffix - /// - M: Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver + /// - C: Current if C==1 then the interest refers to the current declarations. + /// - F: Future if F==1 then the interest refers to the future declarations. Note that if F==0 then: + /// - Declarations SHOULD NOT be sent after the FinalInterest; + /// - UndeclareInterest SHOULD NOT be sent after the FinalInterest. /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|M|N| D_INT | + /// |Z|F|C| D_INT | /// +---------------+ /// ~ intst_id:z32 ~ /// +---------------+ - /// ~ key_scope:z16 ~ + /// |A|M|N|R|T|Q|S|K| (*) /// +---------------+ - /// ~ key_suffix ~ if N==1 -- + /// ~ key_scope:z16 ~ if R==1 /// +---------------+ - /// |A|F|C|X|T|Q|S|K| (*) + /// ~ key_suffix ~ if R==1 && N==1 -- /// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ @@ -776,63 +782,141 @@ pub mod interest { /// - if S==1 then the interest refers to subscribers /// - if Q==1 then the interest refers to queryables /// - if T==1 then the interest refers to tokens - /// - if C==1 then the interest refers to the current declarations. - /// - if F==1 then the interest refers to the future declarations. Note that if F==0 then: - /// - replies SHOULD NOT be sent after the FinalInterest; - /// - UndeclareInterest SHOULD NOT be sent after the FinalInterest. + /// - if R==1 then the interest is restricted to the matching key expression, else it is for all key expressions. + /// - if N==1 then the key expr has name/suffix. If R==0 then N should be set to 0. + /// - if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver. + /// If R==0 then M should be set to 0. /// - if A==1 then the replies SHOULD be aggregated /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct DeclareInterest { pub id: InterestId, - pub wire_expr: WireExpr<'static>, pub interest: Interest, + pub wire_expr: Option>, } - #[repr(transparent)] - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Interest(u8); + impl DeclareInterest { + pub fn flags(&self) -> u8 { + let mut interest = self.interest; + if self.interest.current() { + interest += Interest::CURRENT; + } + if self.interest.future() { + interest += Interest::FUTURE; + } + interest.flags + } + + pub fn options(&self) -> u8 { + let mut interest = self.interest; + if let Some(we) = self.wire_expr.as_ref() { + interest += Interest::RESTRICTED; + if we.has_suffix() { + interest += Interest::NAMED; + } + if let Mapping::Sender = we.mapping { + interest += Interest::MAPPING; + } + } + interest.options + } + + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let id: InterestId = rng.gen(); + let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); + let interest = Interest::rand(); + + Self { + id, + wire_expr, + interest, + } + } + } + + #[derive(Clone, Copy)] + pub struct Interest { + flags: u8, + options: u8, + } impl Interest { - pub const KEYEXPRS: Interest = Interest(1); - pub const SUBSCRIBERS: Interest = Interest(1 << 1); - pub const QUERYABLES: Interest = Interest(1 << 2); - pub const TOKENS: Interest = Interest(1 << 3); - // pub const X: Interest = Interest(1 << 4); - pub const CURRENT: Interest = Interest(1 << 5); - pub const FUTURE: Interest = Interest(1 << 6); - pub const AGGREGATE: Interest = Interest(1 << 7); + // Header + pub const CURRENT: Interest = Interest::flags(interest::flag::C); + pub const FUTURE: Interest = Interest::flags(interest::flag::F); + // Flags + pub const KEYEXPRS: Interest = Interest::options(1); + pub const SUBSCRIBERS: Interest = Interest::options(1 << 1); + pub const QUERYABLES: Interest = Interest::options(1 << 2); + pub const TOKENS: Interest = Interest::options(1 << 3); + const RESTRICTED: Interest = Interest::options(1 << 4); + const NAMED: Interest = Interest::options(1 << 5); + const MAPPING: Interest = Interest::options(1 << 6); + pub const AGGREGATE: Interest = Interest::options(1 << 7); + pub const ALL: Interest = Interest::options( + Interest::KEYEXPRS.options + | Interest::SUBSCRIBERS.options + | Interest::QUERYABLES.options + | Interest::TOKENS.options, + ); + + const fn flags(flags: u8) -> Self { + Self { flags, options: 0 } + } + + const fn options(options: u8) -> Self { + Self { flags: 0, options } + } + + pub const fn empty() -> Self { + Self { + flags: 0, + options: 0, + } + } + + pub const fn current(&self) -> bool { + imsg::has_flag(self.flags, Self::CURRENT.flags) + } + + pub const fn future(&self) -> bool { + imsg::has_flag(self.flags, Self::FUTURE.flags) + } pub const fn keyexprs(&self) -> bool { - imsg::has_flag(self.0, Self::KEYEXPRS.0) + imsg::has_flag(self.options, Self::KEYEXPRS.options) } pub const fn subscribers(&self) -> bool { - imsg::has_flag(self.0, Self::SUBSCRIBERS.0) + imsg::has_flag(self.options, Self::SUBSCRIBERS.options) } pub const fn queryables(&self) -> bool { - imsg::has_flag(self.0, Self::QUERYABLES.0) + imsg::has_flag(self.options, Self::QUERYABLES.options) } pub const fn tokens(&self) -> bool { - imsg::has_flag(self.0, Self::TOKENS.0) + imsg::has_flag(self.options, Self::TOKENS.options) } - pub const fn current(&self) -> bool { - imsg::has_flag(self.0, Self::CURRENT.0) + pub const fn restricted(&self) -> bool { + imsg::has_flag(self.options, Self::RESTRICTED.options) } - pub const fn future(&self) -> bool { - imsg::has_flag(self.0, Self::FUTURE.0) + pub const fn named(&self) -> bool { + imsg::has_flag(self.options, Self::NAMED.options) } - pub const fn aggregate(&self) -> bool { - imsg::has_flag(self.0, Self::AGGREGATE.0) + pub const fn mapping(&self) -> bool { + imsg::has_flag(self.options, Self::MAPPING.options) } - pub const fn as_u8(&self) -> u8 { - self.0 + pub const fn aggregate(&self) -> bool { + imsg::has_flag(self.options, Self::AGGREGATE.options) } #[cfg(feature = "test")] @@ -840,44 +924,129 @@ pub mod interest { use rand::Rng; let mut rng = rand::thread_rng(); - let inner: u8 = rng.gen(); + let mut s = Self::empty(); + if rng.gen_bool(0.5) { + s += Interest::CURRENT; + } + if rng.gen_bool(0.5) { + s += Interest::FUTURE; + } + if rng.gen_bool(0.5) { + s += Interest::KEYEXPRS; + } + if rng.gen_bool(0.5) { + s += Interest::SUBSCRIBERS; + } + if rng.gen_bool(0.5) { + s += Interest::TOKENS; + } + if rng.gen_bool(0.5) { + s += Interest::AGGREGATE; + } + s + } + } - Self(inner) + impl PartialEq for Interest { + fn eq(&self, other: &Self) -> bool { + self.current() == other.current() + && self.future() == other.future() + && self.keyexprs() == other.keyexprs() + && self.subscribers() == other.subscribers() + && self.queryables() == other.queryables() + && self.tokens() == other.tokens() + && self.aggregate() == other.aggregate() } } - impl BitOr for Interest { + impl Debug for Interest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Interest {{ ")?; + if self.current() { + write!(f, "C:Y, ")?; + } else { + write!(f, "C:N, ")?; + } + if self.future() { + write!(f, "F:Y, ")?; + } else { + write!(f, "F:N, ")?; + } + if self.keyexprs() { + write!(f, "K:Y, ")?; + } else { + write!(f, "K:N, ")?; + } + if self.subscribers() { + write!(f, "S:Y, ")?; + } else { + write!(f, "S:N, ")?; + } + if self.queryables() { + write!(f, "Q:Y, ")?; + } else { + write!(f, "Q:N, ")?; + } + if self.tokens() { + write!(f, "T:Y, ")?; + } else { + write!(f, "T:N, ")?; + } + if self.aggregate() { + write!(f, "A:Y")?; + } else { + write!(f, "A:N")?; + } + write!(f, " }}")?; + Ok(()) + } + } + + impl Eq for Interest {} + + impl Add for Interest { type Output = Self; - fn bitor(self, rhs: Self) -> Self::Output { - Self(self.0 | rhs.0) + fn add(self, rhs: Self) -> Self::Output { + Self { + flags: self.flags | rhs.flags, + options: self.options | rhs.options, + } } } - impl From for Interest { - fn from(v: u8) -> Self { - Self(v) + impl AddAssign for Interest { + fn add_assign(&mut self, rhs: Self) { + self.flags |= rhs.flags; + self.options |= rhs.options; } } - impl DeclareInterest { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let id: InterestId = rng.gen(); - let wire_expr = WireExpr::rand(); - let interest = Interest::rand(); + impl Sub for Interest { + type Output = Self; + fn sub(self, rhs: Self) -> Self::Output { Self { - id, - wire_expr, - interest, + flags: self.flags & !rhs.flags, + options: self.options & !rhs.options, } } } + impl SubAssign for Interest { + fn sub_assign(&mut self, rhs: Self) { + self.flags &= !rhs.flags; + self.options &= !rhs.options; + } + } + + impl From<(u8, u8)> for Interest { + fn from(value: (u8, u8)) -> Self { + let (flags, options) = value; + Self { flags, options } + } + } + /// ```text /// Flags: /// - X: Reserved diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 0b069c1337..8147cca31c 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -115,7 +115,7 @@ impl RoutingContext { DeclareBody::UndeclareQueryable(m) => Some(&m.ext_wire_expr.wire_expr), DeclareBody::DeclareToken(m) => Some(&m.wire_expr), DeclareBody::UndeclareToken(m) => Some(&m.ext_wire_expr.wire_expr), - DeclareBody::DeclareInterest(m) => Some(&m.wire_expr), + DeclareBody::DeclareInterest(m) => m.wire_expr.as_ref(), DeclareBody::FinalInterest(_) => None, DeclareBody::UndeclareInterest(m) => Some(&m.ext_wire_expr.wire_expr), }, From 24e5ef573f3454f7bfea2eb86467b28113ffc6dc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 28 Feb 2024 10:31:45 +0100 Subject: [PATCH 004/357] ConsolidationMode can be Auto (#738) * ConsolidationMode rework * Fix QueryConsolidation::DEFAULT --- commons/zenoh-codec/src/zenoh/query.rs | 2 - commons/zenoh-protocol/src/zenoh/query.rs | 16 +++----- zenoh/src/query.rs | 45 ++++------------------- zenoh/src/session.rs | 10 ++--- 4 files changed, 18 insertions(+), 55 deletions(-) diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index 55f25cd5ea..efac7b5671 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -39,7 +39,6 @@ where Consolidation::None => 1, Consolidation::Monotonic => 2, Consolidation::Latest => 3, - Consolidation::Unique => 4, }; self.write(&mut *writer, v) } @@ -58,7 +57,6 @@ where 1 => Consolidation::None, 2 => Consolidation::Monotonic, 3 => Consolidation::Latest, - 4 => Consolidation::Unique, _ => Consolidation::Auto, // Fallback on Auto if Consolidation is unknown }; Ok(c) diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index ac53b963f5..f1baaebe20 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -33,8 +33,8 @@ pub enum Consolidation { Monotonic, /// Holds back samples to only send the set of samples that had the highest timestamp for their key. Latest, - /// Remove the duplicates of any samples based on the their timestamp. - Unique, + // Remove the duplicates of any samples based on the their timestamp. + // Unique, } impl Consolidation { @@ -45,15 +45,9 @@ impl Consolidation { use rand::prelude::SliceRandom; let mut rng = rand::thread_rng(); - *[ - Self::None, - Self::Monotonic, - Self::Latest, - Self::Unique, - Self::Auto, - ] - .choose(&mut rng) - .unwrap() + *[Self::None, Self::Monotonic, Self::Latest, Self::Auto] + .choose(&mut rng) + .unwrap() } } diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 7a7a867cd8..a848913c7a 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -22,38 +22,13 @@ use std::collections::HashMap; use std::future::Ready; use std::time::Duration; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::zenoh::query::Consolidation; use zenoh_result::ZResult; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; /// The kind of consolidation. -#[derive(Debug, Clone, PartialEq, Eq, Copy)] -pub enum ConsolidationMode { - /// No consolidation applied: multiple samples may be received for the same key-timestamp. - None, - /// Monotonic consolidation immediately forwards samples, except if one with an equal or more recent timestamp - /// has already been sent with the same key. - /// - /// This optimizes latency while potentially reducing bandwidth. - /// - /// Note that this doesn't cause re-ordering, but drops the samples for which a more recent timestamp has already - /// been observed with the same key. - Monotonic, - /// Holds back samples to only send the set of samples that had the highest timestamp for their key. - Latest, -} - -impl From for Consolidation { - fn from(val: ConsolidationMode) -> Self { - match val { - ConsolidationMode::None => Consolidation::None, - ConsolidationMode::Monotonic => Consolidation::Monotonic, - ConsolidationMode::Latest => Consolidation::Latest, - } - } -} +pub type ConsolidationMode = zenoh_protocol::zenoh::query::Consolidation; /// The operation: either manual or automatic. #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -65,30 +40,26 @@ pub enum Mode { /// The replies consolidation strategy to apply on replies to a [`get`](Session::get). #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct QueryConsolidation { - pub(crate) mode: Mode, + pub(crate) mode: ConsolidationMode, } impl QueryConsolidation { pub const DEFAULT: Self = Self::AUTO; /// Automatic query consolidation strategy selection. - pub const AUTO: Self = Self { mode: Mode::Auto }; + pub const AUTO: Self = Self { + mode: ConsolidationMode::Auto, + }; pub(crate) const fn from_mode(mode: ConsolidationMode) -> Self { - Self { - mode: Mode::Manual(mode), - } + Self { mode } } /// Returns the requested [`ConsolidationMode`]. - pub fn mode(&self) -> Mode { + pub fn mode(&self) -> ConsolidationMode { self.mode } } -impl From> for QueryConsolidation { - fn from(mode: Mode) -> Self { - Self { mode } - } -} + impl From for QueryConsolidation { fn from(mode: ConsolidationMode) -> Self { Self::from_mode(mode) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 329e44e43f..efb7756ba4 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -1728,14 +1728,14 @@ impl Session { log::trace!("get({}, {:?}, {:?})", selector, target, consolidation); let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { - Mode::Auto => { + ConsolidationMode::Auto => { if selector.decode().any(|(k, _)| k.as_ref() == TIME_RANGE_KEY) { ConsolidationMode::None } else { ConsolidationMode::Latest } } - Mode::Manual(mode) => mode, + mode => mode, }; let qid = state.qid_counter.fetch_add(1, Ordering::SeqCst); let nb_final = match destination { @@ -1808,7 +1808,7 @@ impl Session { ext_budget: None, ext_timeout: Some(timeout), payload: RequestBody::Query(zenoh_protocol::zenoh::Query { - consolidation: consolidation.into(), + consolidation, parameters: selector.parameters().to_string(), ext_sinfo: None, ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { @@ -1829,7 +1829,7 @@ impl Session { selector.parameters(), qid, target, - consolidation.into(), + consolidation, value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, @@ -2441,7 +2441,7 @@ impl Primitives for Session { } } } - ConsolidationMode::Latest => { + Consolidation::Auto | ConsolidationMode::Latest => { match query.replies.as_ref().unwrap().get( new_reply.sample.as_ref().unwrap().key_expr.as_keyexpr(), ) { From e41f768b2b32d0893839807d0c2208e96d71709a Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 28 Feb 2024 12:14:02 +0100 Subject: [PATCH 005/357] Fix bug building reply --- zenoh/src/queryable.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index d0ce99b512..c802c29689 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -214,8 +214,6 @@ impl SyncResolve for ReplyBuilder<'_> { let mut ext_attachment = None; #[cfg(feature = "unstable")] { - data_info.source_id = source_info.source_id; - data_info.source_sn = source_info.source_sn; if let Some(attachment) = attachment { ext_attachment = Some(attachment.into()); } @@ -224,6 +222,11 @@ impl SyncResolve for ReplyBuilder<'_> { }}; } + #[cfg(feature = "unstable")] + { + data_info.source_id = source_info.source_id; + data_info.source_sn = source_info.source_sn; + } let ext_sinfo = if data_info.source_id.is_some() || data_info.source_sn.is_some() { Some(zenoh::put::ext::SourceInfoType { zid: data_info.source_id.unwrap_or_default(), From a8cdbbe802b0c307961c5e731d27cb5cf835e0f2 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 12 Mar 2024 09:39:25 +0100 Subject: [PATCH 006/357] Revised Encoding API and wire format (#764) * Remove KnownEncoding enum and replace it with consts * Fix no_std * Encoding encoder (#746) * Encoding contains a mapping * Add forgotten file * Provide default encoder * Refine encoder * Fix encoding codec * Do not change the protocol representation * Accept Cow<'static, str> in EncodingMapping trait * Improve Value::Display * Fix doctests * Bump EncodingPrefix to u16. Add IANA encoding mapping. * Improve doc * Remove generic from Encoding::starts_with * Remove Display impl for Encoding * Improve doc * Improve doc * Improve encoding parsing * Improve comments * Improve doc * Encoding suffix bitflag * Encoder/Decoder traits take self * Rename encoding() to with_encoding() * Make Value, ZBuf, SingleOrVec empty() const * Derive Encoder for &mut u* and i* * Integers are encoded as le_bytes are not as string * Integers are encoded as le_bytes are not as string * Fix doctest * Refine default encoding mapping * IANA mapping starts from 1024 * Move IANA encoding to zneoh-ext * Improve docs * Improve DefaultEncoding * Add From for ZBuf * Remove Value and Sample Display trait impl * Encoder/Decoder operate on ZBuf * Payload type. Put takes Into. * Flat sample Value to Payload and Encoding fields * Add payload.rs * Polish up Publication * Add serde_cbor::Value as supported DefaultSerializer supported types * Add serde_pickle::Value as supported DefaultSerializer supported types * Add serde_yaml::Value as supported DefaultSerializer supported types * Impl TryFrom for Payload * Remove encoding folder * Polish up Value and Encoding * Fix doctest * Fix some erroneous prelude usage * Fix wrong typedef in publication * Encoding Id and Schema * Encoding Id and Schema * Fix encoding w_len * Wrapper type for Encoding * Add forgotten file * Expand Encoding consts and add doc * Polish doc * Polishing up Payload * Add EncodingMapping trait * Improve docs * Add deserialize in examples * Use deserialize in examples * Remove encoding from zenoh-ext * Add repr(transparent) to Payload * Improve encoding doc --- Cargo.lock | 140 ++- Cargo.toml | 7 +- commons/zenoh-buffers/src/lib.rs | 3 +- commons/zenoh-buffers/src/zbuf.rs | 9 +- commons/zenoh-buffers/src/zslice.rs | 31 +- commons/zenoh-codec/benches/codec.rs | 14 +- commons/zenoh-codec/src/core/encoding.rs | 44 +- commons/zenoh-codec/src/zenoh/put.rs | 6 +- .../zenoh-collections/src/single_or_vec.rs | 10 +- commons/zenoh-protocol/src/core/cowstr.rs | 2 +- commons/zenoh-protocol/src/core/encoding.rs | 292 +----- commons/zenoh-protocol/src/core/mod.rs | 4 +- commons/zenoh-protocol/src/zenoh/mod.rs | 4 +- examples/examples/z_get.rs | 25 +- examples/examples/z_get_liveliness.rs | 9 +- examples/examples/z_ping.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- examples/examples/z_pull.rs | 6 +- examples/examples/z_queryable.rs | 5 +- examples/examples/z_storage.rs | 4 +- examples/examples/z_sub.rs | 5 +- io/zenoh-transport/src/common/batch.rs | 4 +- io/zenoh-transport/src/common/pipeline.rs | 6 +- io/zenoh-transport/src/multicast/link.rs | 2 +- io/zenoh-transport/src/unicast/link.rs | 2 +- .../src/unicast/lowlatency/link.rs | 4 +- .../tests/multicast_compression.rs | 2 +- .../tests/multicast_transport.rs | 2 +- .../tests/unicast_compression.rs | 2 +- .../tests/unicast_concurrent.rs | 4 +- .../tests/unicast_defragmentation.rs | 2 +- .../tests/unicast_intermittent.rs | 2 +- .../tests/unicast_priorities.rs | 2 +- io/zenoh-transport/tests/unicast_shm.rs | 4 +- .../tests/unicast_simultaneous.rs | 2 +- io/zenoh-transport/tests/unicast_transport.rs | 2 +- plugins/zenoh-plugin-example/src/lib.rs | 3 +- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 6 +- plugins/zenoh-plugin-rest/src/lib.rs | 71 +- .../src/replica/align_queryable.rs | 15 +- .../src/replica/aligner.rs | 29 +- .../src/replica/mod.rs | 5 +- .../src/replica/storage.rs | 43 +- .../tests/operations.rs | 7 +- .../tests/wildcard.rs | 11 +- zenoh-ext/Cargo.toml | 3 + zenoh-ext/examples/z_query_sub.rs | 8 +- zenoh-ext/src/group.rs | 4 +- zenoh-ext/src/lib.rs | 13 + zenoh-ext/src/querying_subscriber.rs | 48 +- zenoh-ext/src/subscriber_ext.rs | 12 +- zenoh/Cargo.toml | 4 + zenoh/src/admin.rs | 26 +- zenoh/src/encoding.rs | 850 ++++++++++++++++++ zenoh/src/lib.rs | 25 +- zenoh/src/liveliness.rs | 13 +- zenoh/src/net/routing/dispatcher/queries.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 87 +- zenoh/src/net/tests/tables.rs | 10 +- zenoh/src/payload.rs | 673 ++++++++++++++ zenoh/src/prelude.rs | 20 +- zenoh/src/publication.rs | 110 ++- zenoh/src/queryable.rs | 19 +- zenoh/src/sample.rs | 115 +-- zenoh/src/session.rs | 53 +- zenoh/src/subscriber.rs | 18 +- zenoh/src/value.rs | 696 +------------- zenoh/tests/attachments.rs | 2 +- zenoh/tests/routing.rs | 8 +- zenoh/tests/session.rs | 6 +- zenoh/tests/unicity.rs | 6 +- 72 files changed, 2255 insertions(+), 1434 deletions(-) create mode 100644 zenoh/src/encoding.rs create mode 100644 zenoh/src/payload.rs diff --git a/Cargo.lock b/Cargo.lock index 1d5fab2365..53f2600071 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -446,7 +446,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -739,7 +739,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -1025,7 +1025,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -1337,7 +1337,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -1703,6 +1703,12 @@ dependencies = [ "nom", ] +[[package]] +name = "iter-read" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c397ca3ea05ad509c4ec451fea28b4771236a376ca1c69fd5143aae0cf8f93c4" + [[package]] name = "itertools" version = "0.10.5" @@ -2110,9 +2116,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", @@ -2267,7 +2273,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -2291,6 +2297,48 @@ dependencies = [ "indexmap", ] +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_macros", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared", + "rand 0.8.5", +] + +[[package]] +name = "phf_macros" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", + "syn 2.0.52", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.1.3" @@ -2308,7 +2356,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -2451,9 +2499,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] @@ -2508,9 +2556,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -3005,22 +3053,45 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-pickle" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762ad136a26407c6a80825813600ceeab5e613660d93d79a41f0ec877171e71" +dependencies = [ + "byteorder", + "iter-read", + "num-bigint", + "num-traits", + "serde", +] + +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half", + "serde", +] + [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -3045,9 +3116,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -3227,6 +3298,12 @@ dependencies = [ "event-listener 2.5.3", ] +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "slab" version = "0.4.9" @@ -3456,9 +3533,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.33" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -3491,7 +3568,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -3641,7 +3718,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -3690,7 +3767,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -3995,7 +4072,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", "wasm-bindgen-shared", ] @@ -4029,7 +4106,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4325,11 +4402,15 @@ dependencies = [ "ordered-float", "paste", "petgraph", + "phf", "rand 0.8.5", "regex", "rustc_version 0.4.0", "serde", + "serde-pickle", + "serde_cbor", "serde_json", + "serde_yaml", "socket2 0.5.4", "stop-token", "uhlc", @@ -4467,7 +4548,10 @@ dependencies = [ "flume", "futures", "log", + "phf", "serde", + "serde_cbor", + "serde_json", "zenoh", "zenoh-core", "zenoh-macros", @@ -4695,7 +4779,7 @@ version = "0.11.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", "zenoh-keyexpr", ] @@ -4939,7 +5023,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d7210ebc0e..9830b56490 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,11 +110,12 @@ libloading = "0.8" log = "0.4.17" lz4_flex = "0.11" nix = { version = "0.27", features = ["fs"] } -num_cpus = "1.15.0" +num_cpus = "1.16.0" ordered-float = "4.1.1" panic-message = "0.3.0" paste = "1.0.12" petgraph = "0.6.3" +phf = { version = "0.11.2", features = ["macros"] } pnet = "0.34" pnet_datalink = "0.34" proc-macro2 = "1.0.51" @@ -136,7 +137,9 @@ secrecy = { version = "0.8.0", features = ["serde", "alloc"] } serde = { version = "1.0.154", default-features = false, features = [ "derive", ] } # Default features are disabled due to usage in no_std crates -serde_json = "1.0.94" +serde_cbor = "0.11.2" +serde_json = "1.0.114" +serde-pickle = "1.1.1" serde_yaml = "0.9.19" sha3 = "0.10.6" shared_memory = "0.12.4" diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index 4dee599ea7..eae7f1715c 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -101,7 +101,8 @@ pub mod buffer { let mut slices = self.slices(); match slices.len() { 0 => Cow::Borrowed(b""), - 1 => Cow::Borrowed(slices.next().unwrap()), + // SAFETY: it's safe to use unwrap_unchecked() beacuse we are explicitly checking the length is 1. + 1 => Cow::Borrowed(unsafe { slices.next().unwrap_unchecked() }), _ => Cow::Owned(slices.fold(Vec::new(), |mut acc, it| { acc.extend(it); acc diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 1365397966..fd86f454af 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -34,8 +34,10 @@ pub struct ZBuf { impl ZBuf { #[must_use] - pub fn empty() -> Self { - Self::default() + pub const fn empty() -> Self { + Self { + slices: SingleOrVec::empty(), + } } pub fn clear(&mut self) { @@ -72,6 +74,7 @@ impl ZBuf { } self.insert(start, replacement); } + fn remove(&mut self, mut start: usize, mut end: usize) { assert!(start <= end); assert!(end <= self.len()); @@ -100,6 +103,7 @@ impl ZBuf { let drain_end = end_slice_idx + (end_slice.start >= end_slice.end) as usize; self.slices.drain(drain_start..drain_end); } + fn insert(&mut self, mut at: usize, slice: &[u8]) { if slice.is_empty() { return; @@ -206,6 +210,7 @@ where zbuf } } + // Reader #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct ZBufPos { diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index e53e6f3334..c15cbc6828 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -92,24 +92,41 @@ pub struct ZSlice { } impl ZSlice { + #[deprecated(note = "use `new` instead")] pub fn make( buf: Arc, start: usize, end: usize, + ) -> Result> { + Self::new(buf, start, end) + } + + pub fn new( + buf: Arc, + start: usize, + end: usize, ) -> Result> { if start <= end && end <= buf.as_slice().len() { - Ok(ZSlice { - buf, - start, - end, - #[cfg(feature = "shared-memory")] - kind: ZSliceKind::Raw, - }) + // unsafe: this operation is safe because we just checked the slice boundaries + Ok(unsafe { ZSlice::new_unchecked(buf, start, end) }) } else { Err(buf) } } + /// # Safety + /// This function does not verify wether the `start` and `end` indexes are within the buffer boundaries. + /// If a [`ZSlice`] is built via this constructor, a later access may panic if `start` and `end` indexes are out-of-bound. + pub unsafe fn new_unchecked(buf: Arc, start: usize, end: usize) -> Self { + ZSlice { + buf, + start, + end, + #[cfg(feature = "shared-memory")] + kind: ZSliceKind::Raw, + } + } + #[inline] #[must_use] pub fn downcast_ref(&self) -> Option<&T> diff --git a/commons/zenoh-codec/benches/codec.rs b/commons/zenoh-codec/benches/codec.rs index 34c9313a7f..d897038f91 100644 --- a/commons/zenoh-codec/benches/codec.rs +++ b/commons/zenoh-codec/benches/codec.rs @@ -87,7 +87,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -133,7 +133,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -174,7 +174,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -215,7 +215,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -243,7 +243,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -282,7 +282,7 @@ fn criterion_benchmark(c: &mut Criterion) { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -305,7 +305,7 @@ fn criterion_benchmark(c: &mut Criterion) { let mut idx = 0; while idx < zslice.len() { let len = (zslice.len() - idx).min(chunk); - zbuf.push_zslice(ZSlice::make(buff.clone(), idx, idx + len).unwrap()); + zbuf.push_zslice(ZSlice::new(buff.clone(), idx, idx + len).unwrap()); idx += len; } diff --git a/commons/zenoh-codec/src/core/encoding.rs b/commons/zenoh-codec/src/core/encoding.rs index 478bcf1cd8..cfbe0084ba 100644 --- a/commons/zenoh-codec/src/core/encoding.rs +++ b/commons/zenoh-codec/src/core/encoding.rs @@ -12,16 +12,22 @@ // ZettaScale Zenoh Team, // use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; -use alloc::string::String; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_protocol::core::Encoding; +use zenoh_protocol::{ + common::imsg, + core::encoding::{flag, Encoding, EncodingId}, +}; impl LCodec<&Encoding> for Zenoh080 { fn w_len(self, x: &Encoding) -> usize { - 1 + self.w_len(x.suffix()) + let mut len = self.w_len((x.id as u32) << 1); + if let Some(schema) = x.schema.as_ref() { + len += self.w_len(schema.as_slice()); + } + len } } @@ -32,9 +38,17 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Encoding) -> Self::Output { - let zodec = Zenoh080Bounded::::new(); - zodec.write(&mut *writer, *x.prefix() as u8)?; - zodec.write(&mut *writer, x.suffix())?; + let mut id = (x.id as u32) << 1; + + if x.schema.is_some() { + id |= flag::S; + } + let zodec = Zenoh080Bounded::::new(); + zodec.write(&mut *writer, id)?; + if let Some(schema) = x.schema.as_ref() { + let zodec = Zenoh080Bounded::::new(); + zodec.write(&mut *writer, schema)?; + } Ok(()) } } @@ -46,10 +60,20 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let zodec = Zenoh080Bounded::::new(); - let prefix: u8 = zodec.read(&mut *reader)?; - let suffix: String = zodec.read(&mut *reader)?; - let encoding = Encoding::new(prefix, suffix).map_err(|_| DidntRead)?; + let zodec = Zenoh080Bounded::::new(); + let id: u32 = zodec.read(&mut *reader)?; + let (id, has_suffix) = ( + (id >> 1) as EncodingId, + imsg::has_flag(id as u8, flag::S as u8), + ); + + let mut schema = None; + if has_suffix { + let zodec = Zenoh080Bounded::::new(); + schema = Some(zodec.read(&mut *reader)?); + } + + let encoding = Encoding { id, schema }; Ok(encoding) } } diff --git a/commons/zenoh-codec/src/zenoh/put.rs b/commons/zenoh-codec/src/zenoh/put.rs index 4f50be4872..776b47245f 100644 --- a/commons/zenoh-codec/src/zenoh/put.rs +++ b/commons/zenoh-codec/src/zenoh/put.rs @@ -54,7 +54,7 @@ where if timestamp.is_some() { header |= flag::T; } - if encoding != &Encoding::DEFAULT { + if encoding != &Encoding::empty() { header |= flag::E; } let mut n_exts = (ext_sinfo.is_some()) as u8 @@ -73,7 +73,7 @@ where if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } - if encoding != &Encoding::DEFAULT { + if encoding != &Encoding::empty() { self.write(&mut *writer, encoding)?; } @@ -143,7 +143,7 @@ where timestamp = Some(self.codec.read(&mut *reader)?); } - let mut encoding = Encoding::DEFAULT; + let mut encoding = Encoding::empty(); if imsg::has_flag(self.header, flag::E) { encoding = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-collections/src/single_or_vec.rs b/commons/zenoh-collections/src/single_or_vec.rs index c68ac6d8ff..ceb43e4025 100644 --- a/commons/zenoh-collections/src/single_or_vec.rs +++ b/commons/zenoh-collections/src/single_or_vec.rs @@ -30,6 +30,10 @@ enum SingleOrVecInner { } impl SingleOrVecInner { + const fn empty() -> Self { + SingleOrVecInner::Vec(Vec::new()) + } + fn push(&mut self, value: T) { match self { SingleOrVecInner::Vec(vec) if vec.capacity() == 0 => *self = Self::Single(value), @@ -53,7 +57,7 @@ where impl Default for SingleOrVecInner { fn default() -> Self { - SingleOrVecInner::Vec(Vec::new()) + Self::empty() } } @@ -88,6 +92,10 @@ where pub struct SingleOrVec(SingleOrVecInner); impl SingleOrVec { + pub const fn empty() -> Self { + Self(SingleOrVecInner::empty()) + } + pub fn push(&mut self, value: T) { self.0.push(value); } diff --git a/commons/zenoh-protocol/src/core/cowstr.rs b/commons/zenoh-protocol/src/core/cowstr.rs index 33dac4524f..209d020f40 100644 --- a/commons/zenoh-protocol/src/core/cowstr.rs +++ b/commons/zenoh-protocol/src/core/cowstr.rs @@ -21,7 +21,7 @@ enum CowStrInner<'a> { } pub struct CowStr<'a>(CowStrInner<'a>); impl<'a> CowStr<'a> { - pub(crate) fn borrowed(s: &'a str) -> Self { + pub(crate) const fn borrowed(s: &'a str) -> Self { Self(CowStrInner::Borrowed(s)) } pub fn as_str(&self) -> &str { diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index b3abae8aae..9b9aa5bf2f 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -11,282 +11,68 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::core::CowStr; -use alloc::{borrow::Cow, string::String}; -use core::{ - convert::TryFrom, - fmt::{self, Debug}, - mem, -}; -use zenoh_result::{bail, zerror, ZError, ZResult}; - -mod consts { - pub(super) const MIMES: [&str; 21] = [ - /* 0 */ "", - /* 1 */ "application/octet-stream", - /* 2 */ "application/custom", // non iana standard - /* 3 */ "text/plain", - /* 4 */ "application/properties", // non iana standard - /* 5 */ "application/json", // if not readable from casual users - /* 6 */ "application/sql", - /* 7 */ "application/integer", // non iana standard - /* 8 */ "application/float", // non iana standard - /* 9 */ - "application/xml", // if not readable from casual users (RFC 3023, sec 3) - /* 10 */ "application/xhtml+xml", - /* 11 */ "application/x-www-form-urlencoded", - /* 12 */ "text/json", // non iana standard - if readable from casual users - /* 13 */ "text/html", - /* 14 */ "text/xml", // if readable from casual users (RFC 3023, section 3) - /* 15 */ "text/css", - /* 16 */ "text/csv", - /* 17 */ "text/javascript", - /* 18 */ "image/jpeg", - /* 19 */ "image/png", - /* 20 */ "image/gif", - ]; -} - -#[repr(u8)] -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum KnownEncoding { - Empty = 0, - AppOctetStream = 1, - AppCustom = 2, - TextPlain = 3, - AppProperties = 4, - AppJson = 5, - AppSql = 6, - AppInteger = 7, - AppFloat = 8, - AppXml = 9, - AppXhtmlXml = 10, - AppXWwwFormUrlencoded = 11, - TextJson = 12, - TextHtml = 13, - TextXml = 14, - TextCss = 15, - TextCsv = 16, - TextJavascript = 17, - ImageJpeg = 18, - ImagePng = 19, - ImageGif = 20, -} - -impl From for u8 { - fn from(val: KnownEncoding) -> Self { - val as u8 - } -} - -impl From for &str { - fn from(val: KnownEncoding) -> Self { - consts::MIMES[u8::from(val) as usize] - } -} - -impl TryFrom for KnownEncoding { - type Error = ZError; - fn try_from(value: u8) -> Result { - if value < consts::MIMES.len() as u8 + 1 { - Ok(unsafe { mem::transmute(value) }) - } else { - Err(zerror!("Unknown encoding")) - } - } -} - -impl AsRef for KnownEncoding { - fn as_ref(&self) -> &str { - consts::MIMES[u8::from(*self) as usize] - } -} - -/// The encoding of a zenoh `zenoh::Value`. -/// -/// A zenoh encoding is a HTTP Mime type represented, for wire efficiency, -/// as an integer prefix (that maps to a string) and a string suffix. +use core::fmt::Debug; +use zenoh_buffers::ZSlice; + +pub type EncodingId = u16; + +/// [`Encoding`] is a metadata that indicates how the data payload should be interpreted. +/// For wire-efficiency and extensibility purposes, Zenoh defines an [`Encoding`] as +/// composed of an unsigned integer prefix and a string suffix. The actual meaning of the +/// prefix and suffix are out-of-scope of the protocol definition. Therefore, Zenoh does not +/// impose any encoding mapping and users are free to use any mapping they like. +/// Nevertheless, it is worth highlighting that Zenoh still provides a default mapping as part +/// of the API as per user convenience. That mapping has no impact on the Zenoh protocol definition. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum Encoding { - Exact(KnownEncoding), - WithSuffix(KnownEncoding, CowStr<'static>), +pub struct Encoding { + pub id: EncodingId, + pub schema: Option, } -impl Encoding { - pub fn new(prefix: u8, suffix: IntoCowStr) -> ZResult - where - IntoCowStr: Into> + AsRef, - { - let prefix = KnownEncoding::try_from(prefix)?; - let suffix = suffix.into(); - if suffix.as_bytes().len() > u8::MAX as usize { - bail!("Suffix length is limited to 255 characters") - } - if suffix.as_ref().is_empty() { - Ok(Encoding::Exact(prefix)) - } else { - Ok(Encoding::WithSuffix(prefix, suffix.into())) - } - } - - /// Sets the suffix of this encoding. - pub fn with_suffix(self, suffix: IntoCowStr) -> ZResult - where - IntoCowStr: Into> + AsRef, - { - match self { - Encoding::Exact(e) => Encoding::new(e as u8, suffix), - Encoding::WithSuffix(e, s) => Encoding::new(e as u8, s + suffix.as_ref()), - } - } - - pub fn as_ref<'a, T>(&'a self) -> T - where - &'a Self: Into, - { - self.into() - } - - /// Returns `true`if the string representation of this encoding starts with - /// the string representation of ther given encoding. - pub fn starts_with(&self, with: T) -> bool - where - T: Into, - { - let with: Encoding = with.into(); - self.prefix() == with.prefix() && self.suffix().starts_with(with.suffix()) - } - - pub const fn prefix(&self) -> &KnownEncoding { - match self { - Encoding::Exact(e) | Encoding::WithSuffix(e, _) => e, - } - } - - pub fn suffix(&self) -> &str { - match self { - Encoding::Exact(_) => "", - Encoding::WithSuffix(_, s) => s.as_ref(), - } - } +/// # Encoding field +/// +/// ```text +/// 7 6 5 4 3 2 1 0 +/// +-+-+-+-+-+-+-+-+ +/// ~ id: z16 |S~ +/// +---------------+ +/// ~schema: ~ -- if S==1 +/// +---------------+ +/// ``` +pub mod flag { + pub const S: u32 = 1; // 0x01 Suffix if S==1 then suffix is present } impl Encoding { - pub const EMPTY: Encoding = Encoding::Exact(KnownEncoding::Empty); - pub const APP_OCTET_STREAM: Encoding = Encoding::Exact(KnownEncoding::AppOctetStream); - pub const APP_CUSTOM: Encoding = Encoding::Exact(KnownEncoding::AppCustom); - pub const TEXT_PLAIN: Encoding = Encoding::Exact(KnownEncoding::TextPlain); - pub const APP_PROPERTIES: Encoding = Encoding::Exact(KnownEncoding::AppProperties); - pub const APP_JSON: Encoding = Encoding::Exact(KnownEncoding::AppJson); - pub const APP_SQL: Encoding = Encoding::Exact(KnownEncoding::AppSql); - pub const APP_INTEGER: Encoding = Encoding::Exact(KnownEncoding::AppInteger); - pub const APP_FLOAT: Encoding = Encoding::Exact(KnownEncoding::AppFloat); - pub const APP_XML: Encoding = Encoding::Exact(KnownEncoding::AppXml); - pub const APP_XHTML_XML: Encoding = Encoding::Exact(KnownEncoding::AppXhtmlXml); - pub const APP_XWWW_FORM_URLENCODED: Encoding = - Encoding::Exact(KnownEncoding::AppXWwwFormUrlencoded); - pub const TEXT_JSON: Encoding = Encoding::Exact(KnownEncoding::TextJson); - pub const TEXT_HTML: Encoding = Encoding::Exact(KnownEncoding::TextHtml); - pub const TEXT_XML: Encoding = Encoding::Exact(KnownEncoding::TextXml); - pub const TEXT_CSS: Encoding = Encoding::Exact(KnownEncoding::TextCss); - pub const TEXT_CSV: Encoding = Encoding::Exact(KnownEncoding::TextCsv); - pub const TEXT_JAVASCRIPT: Encoding = Encoding::Exact(KnownEncoding::TextJavascript); - pub const IMAGE_JPEG: Encoding = Encoding::Exact(KnownEncoding::ImageJpeg); - pub const IMAGE_PNG: Encoding = Encoding::Exact(KnownEncoding::ImagePng); - pub const IMAGE_GIF: Encoding = Encoding::Exact(KnownEncoding::ImageGif); -} - -impl fmt::Display for Encoding { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Encoding::Exact(e) => f.write_str(e.as_ref()), - Encoding::WithSuffix(e, s) => { - f.write_str(e.as_ref())?; - f.write_str(s) - } - } - } -} - -impl From<&'static str> for Encoding { - fn from(s: &'static str) -> Self { - for (i, v) in consts::MIMES.iter().enumerate().skip(1) { - if let Some(suffix) = s.strip_prefix(v) { - if suffix.is_empty() { - return Encoding::Exact(unsafe { mem::transmute(i as u8) }); - } else { - return Encoding::WithSuffix(unsafe { mem::transmute(i as u8) }, suffix.into()); - } - } - } - if s.is_empty() { - Encoding::Exact(KnownEncoding::Empty) - } else { - Encoding::WithSuffix(KnownEncoding::Empty, s.into()) - } - } -} - -impl From for Encoding { - fn from(mut s: String) -> Self { - for (i, v) in consts::MIMES.iter().enumerate().skip(1) { - if s.starts_with(v) { - s.replace_range(..v.len(), ""); - if s.is_empty() { - return Encoding::Exact(unsafe { mem::transmute(i as u8) }); - } else { - return Encoding::WithSuffix(unsafe { mem::transmute(i as u8) }, s.into()); - } - } - } - if s.is_empty() { - Encoding::Exact(KnownEncoding::Empty) - } else { - Encoding::WithSuffix(KnownEncoding::Empty, s.into()) + /// Returns a new [`Encoding`] object with default empty prefix ID. + pub const fn empty() -> Self { + Self { + id: 0, + schema: None, } } } -impl From<&KnownEncoding> for Encoding { - fn from(e: &KnownEncoding) -> Encoding { - Encoding::Exact(*e) - } -} - -impl From for Encoding { - fn from(e: KnownEncoding) -> Encoding { - Encoding::Exact(e) - } -} - impl Default for Encoding { fn default() -> Self { - KnownEncoding::Empty.into() + Self::empty() } } impl Encoding { - pub const DEFAULT: Self = Self::EMPTY; - #[cfg(feature = "test")] pub fn rand() -> Self { - use rand::{ - distributions::{Alphanumeric, DistString}, - Rng, - }; + use rand::Rng; const MIN: usize = 2; const MAX: usize = 16; let mut rng = rand::thread_rng(); - let prefix: u8 = rng.gen_range(0..20); - let suffix: String = if rng.gen_bool(0.5) { - let len = rng.gen_range(MIN..MAX); - Alphanumeric.sample_string(&mut rng, len) - } else { - String::new() - }; - Encoding::new(prefix, suffix).unwrap() + let id: EncodingId = rng.gen(); + let schema = rng + .gen_bool(0.5) + .then_some(ZSlice::rand(rng.gen_range(MIN..MAX))); + Encoding { id, schema } } } diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 3e9315bec2..82658db2fd 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -41,8 +41,8 @@ pub use wire_expr::*; mod cowstr; pub use cowstr::CowStr; -mod encoding; -pub use encoding::{Encoding, KnownEncoding}; +pub mod encoding; +pub use encoding::{Encoding, EncodingId}; pub mod locator; pub use locator::*; diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index d73d8cdd06..4c8458885b 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -209,12 +209,14 @@ pub mod ext { } } + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ encoding ~ /// +---------------+ - /// ~ pl: [u8;z32] ~ -- Payload + /// ~ pl: ~ -- Payload /// +---------------+ + /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct ValueType { #[cfg(feature = "shared-memory")] diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 57c36c2e62..0fff95c250 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; @@ -40,12 +39,24 @@ async fn main() { .unwrap(); while let Ok(reply) = replies.recv_async().await { match reply.sample { - Ok(sample) => println!( - ">> Received ('{}': '{}')", - sample.key_expr.as_str(), - sample.value, - ), - Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), + Ok(sample) => { + let payload = sample + .payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!( + ">> Received ('{}': '{}')", + sample.key_expr.as_str(), + payload, + ); + } + Err(err) => { + let payload = err + .payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } } } } diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index e0aaf8cd23..036dc0ab98 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; @@ -39,7 +38,13 @@ async fn main() { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr.as_str(),), - Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), + Err(err) => { + let payload = err + .payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } } } } diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index fe5ed4d46b..cb6fecd81a 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -38,7 +38,7 @@ fn main() { .res() .unwrap(); - let data: Value = (0usize..size) + let data: Payload = (0usize..size) .map(|i| (i % 10) as u8) .collect::>() .into(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index f057075434..1f06c7abb9 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -41,7 +41,7 @@ fn main() { let _sub = session .declare_subscriber(key_expr_ping) - .callback(move |sample| publisher.put(sample.value).res().unwrap()) + .callback(move |sample| publisher.put(sample.payload).res().unwrap()) .res() .unwrap(); for _ in stdin().bytes().take_while(|b| !matches!(b, Ok(b'q'))) {} diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index b698cbc80b..7a3e90f627 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -30,7 +30,7 @@ fn main() { let payload_size = args.payload_size; - let data: Value = (0..payload_size) + let data: Payload = (0..payload_size) .map(|i| (i % 10) as u8) .collect::>() .into(); diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 812c47294e..ed2a90f1a6 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -44,11 +44,15 @@ async fn main() { // Define the future to handle incoming samples of the subscription. let subs = async { while let Ok(sample) = subscriber.recv_async().await { + let payload = sample + .payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); println!( ">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(), - sample.value, + payload, ); } }; diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 54b9858cf0..d7376835b7 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -49,7 +49,10 @@ async fn main() { let query = query.unwrap(); match query.value() { None => println!(">> [Queryable ] Received Query '{}'", query.selector()), - Some(value) => println!(">> [Queryable ] Received Query '{}' with value '{}'", query.selector(), value), + Some(value) => { + let payload = value.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Queryable ] Received Query '{}' with value '{}'", query.selector(), payload); + }, } let reply = if send_errors.swap(false, Relaxed) { println!( diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 79164c914a..5e0eaabd44 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -53,8 +53,8 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - println!(">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, sample.key_expr.as_str(), sample.value); + let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(),payload); if sample.kind == SampleKind::Delete { stored.remove(&sample.key_expr.to_string()); } else { diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 0542f85870..195e2f7640 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -46,10 +46,9 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - println!(">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, sample.key_expr.as_str(), sample.value); + let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(), payload); }, - _ = stdin.read_exact(&mut input).fuse() => { match input[0] { b'q' => break, diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index a6aad76f7b..e923a7e1af 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -460,7 +460,7 @@ impl RBatch { let mut into = (buff)(); let n = lz4_flex::block::decompress_into(payload, into.as_mut_slice()) .map_err(|_| zerror!("Decompression error"))?; - let zslice = ZSlice::make(Arc::new(into), 0, n) + let zslice = ZSlice::new(Arc::new(into), 0, n) .map_err(|_| zerror!("Invalid decompression buffer length"))?; Ok(zslice) } @@ -579,7 +579,7 @@ mod tests { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index eebf23abc9..3968eabdf5 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -754,7 +754,7 @@ mod tests { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -884,7 +884,7 @@ mod tests { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -996,7 +996,7 @@ mod tests { ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index b24c077c57..0172902935 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -216,7 +216,7 @@ impl TransportLinkMulticastRx { let mut into = (buff)(); let (n, locator) = self.inner.link.read(into.as_mut_slice()).await?; - let buffer = ZSlice::make(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; + let buffer = ZSlice::new(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; let mut batch = RBatch::new(self.inner.config.batch, buffer); batch.initialize(buff).map_err(|_| zerror!("{ERR}{self}"))?; Ok((batch, locator.into_owned())) diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index bd756d6396..daa6c3e5a5 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -232,7 +232,7 @@ impl TransportLinkUnicastRx { // log::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); - let buffer = ZSlice::make(Arc::new(into), 0, end) + let buffer = ZSlice::new(Arc::new(into), 0, end) .map_err(|_| zerror!("{ERR}{self}. ZSlice index(es) out of bounds"))?; let mut batch = RBatch::new(self.batch, buffer); batch diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 6a382f5960..3c290ac89e 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -239,7 +239,7 @@ async fn rx_task_stream( transport.stats.inc_rx_bytes(2 + bytes); // Account for the batch len encoding (16 bits) // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); + let zslice = ZSlice::new(Arc::new(buffer), 0, bytes).unwrap(); transport.read_messages(zslice, &link.link).await?; } } @@ -274,7 +274,7 @@ async fn rx_task_dgram( transport.stats.inc_rx_bytes(bytes); // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); + let zslice = ZSlice::new(Arc::new(buffer), 0, bytes).unwrap(); transport.read_messages(zslice, &link.link).await?; } } diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index 4d1196e10f..5301b967f6 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -273,7 +273,7 @@ mod tests { payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index fe5a44b7ee..69c1decd83 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -269,7 +269,7 @@ mod tests { payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index dd4f55b5f5..a9c10e1a9e 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -301,7 +301,7 @@ mod tests { payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index 4e90432193..b14cebaaf9 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -200,7 +200,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec, endpoint02: Vec, client_transport: TransportUn payload: Put { payload: vec![0u8; *ms].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index d12a9db7dc..5ec7e31aba 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -277,7 +277,7 @@ mod tests { payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, ext_shm: None, ext_attachment: None, @@ -325,7 +325,7 @@ mod tests { payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, ext_shm: None, ext_attachment: None, diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index db73e99480..d465497556 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -82,7 +82,7 @@ mod tests { payload: Put { payload: vec![0u8; MSG_SIZE].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 795ea90b41..2a830a9e2b 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -472,7 +472,7 @@ async fn test_transport( payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index c2f083827d..592a08ca9b 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -164,7 +164,8 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // on sample received by the Subscriber sample = sub.recv_async() => { let sample = sample.unwrap(); - info!("Received data ('{}': '{}')", sample.key_expr, sample.value); + let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); + info!("Received data ('{}': '{}')", sample.key_expr, payload); stored.insert(sample.key_expr.to_string(), sample); }, // on query received by the Queryable diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 0c6eb4357b..c5bdcc4c73 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -75,11 +75,7 @@ async fn main() { println!("Data updates are accessible through HTML5 SSE at http://:8000/{key}"); loop { - publisher - .put(Value::from(value).encoding(KnownEncoding::TextPlain.into())) - .res() - .await - .unwrap(); + publisher.put(value).res().await.unwrap(); async_std::task::sleep(Duration::from_secs(1)).await; } } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 6f4e80f4eb..1a99d7b5a4 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -21,6 +21,7 @@ use async_std::prelude::FutureExt; use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; use futures::StreamExt; use http_types::Method; +use std::borrow::Cow; use std::convert::TryFrom; use std::str::FromStr; use std::sync::Arc; @@ -29,7 +30,6 @@ use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; -use zenoh::properties::Properties; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::selector::TIME_RANGE_KEY; @@ -46,38 +46,18 @@ lazy_static::lazy_static! { } const RAW_KEY: &str = "_raw"; -fn value_to_json(value: Value) -> String { - // @TODO: transcode to JSON when implemented in Value - match &value.encoding { - p if p.starts_with(KnownEncoding::TextPlain) - || p.starts_with(KnownEncoding::AppXWwwFormUrlencoded) => - { - // convert to Json string for special characters escaping - serde_json::json!(value.to_string()).to_string() - } - p if p.starts_with(KnownEncoding::AppProperties) => { - // convert to Json string for special characters escaping - serde_json::json!(*Properties::from(value.to_string())).to_string() - } - p if p.starts_with(KnownEncoding::AppJson) - || p.starts_with(KnownEncoding::AppInteger) - || p.starts_with(KnownEncoding::AppFloat) => - { - value.to_string() - } - _ => { - format!(r#""{}""#, b64_std_engine.encode(value.payload.contiguous())) - } - } +fn payload_to_json(payload: Payload) -> String { + payload + .deserialize::() + .unwrap_or_else(|_| format!(r#""{}""#, b64_std_engine.encode(payload.contiguous()))) } fn sample_to_json(sample: Sample) -> String { - let encoding = sample.value.encoding.to_string(); format!( r#"{{ "key": "{}", "value": {}, "encoding": "{}", "time": "{}" }}"#, sample.key_expr.as_str(), - value_to_json(sample.value), - encoding, + payload_to_json(sample.payload), + sample.encoding, if let Some(ts) = sample.timestamp { ts.to_string() } else { @@ -90,11 +70,10 @@ fn result_to_json(sample: Result) -> String { match sample { Ok(sample) => sample_to_json(sample), Err(err) => { - let encoding = err.encoding.to_string(); format!( r#"{{ "key": "ERROR", "value": {}, "encoding": "{}"}}"#, - value_to_json(err), - encoding, + payload_to_json(err.payload), + err.encoding, ) } } @@ -157,12 +136,12 @@ async fn to_raw_response(results: flume::Receiver) -> Response { Ok(reply) => match reply.sample { Ok(sample) => response( StatusCode::Ok, - sample.value.encoding.to_string().as_ref(), + Cow::from(&sample.encoding).as_ref(), String::from_utf8_lossy(&sample.payload.contiguous()).as_ref(), ), Err(value) => response( StatusCode::Ok, - value.encoding.to_string().as_ref(), + Cow::from(&value.encoding).as_ref(), String::from_utf8_lossy(&value.payload.contiguous()).as_ref(), ), }, @@ -404,9 +383,9 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { @@ -441,21 +420,25 @@ async fn write(mut req: Request<(Arc, String)>) -> tide::Result { + session + .put(&key_expr, bytes) + .with_encoding(encoding) + .res() + .await + } + SampleKind::Delete => session.delete(&key_expr).res().await, + }; + match res { Ok(_) => Ok(Response::new(StatusCode::Ok)), Err(e) => Ok(response( StatusCode::InternalServerError, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 7295367a06..359b8dd7e8 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,6 +18,7 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; +use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; @@ -115,7 +116,12 @@ impl AlignQueryable { query.reply(Ok(sample)).res().await.unwrap(); } AlignData::Data(k, (v, ts)) => { - let sample = Sample::new(k, v).with_timestamp(ts); + let Value { + payload, encoding, .. + } = v; + let sample = Sample::new(k, payload) + .with_encoding(encoding) + .with_timestamp(ts); query.reply(Ok(sample)).res().await.unwrap(); } } @@ -165,7 +171,10 @@ impl AlignQueryable { let entry = entry.unwrap(); result.push(AlignData::Data( OwnedKeyExpr::from(entry.key_expr), - (entry.value, each.timestamp), + ( + Value::new(entry.payload).with_encoding(entry.encoding), + each.timestamp, + ), )); } } @@ -221,7 +230,7 @@ impl AlignQueryable { log::trace!( "[ALIGN QUERYABLE] Received ('{}': '{}')", sample.key_expr.as_str(), - sample.value + StringOrBase64::from(sample.payload.clone()) ); if let Some(timestamp) = sample.timestamp { match timestamp.cmp(&logentry.timestamp) { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 041567ae27..03c6fa949a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -19,6 +19,7 @@ use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; +use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; @@ -104,7 +105,12 @@ impl Aligner { log::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { - let sample = Sample::new(key, value).with_timestamp(ts); + let Value { + payload, encoding, .. + } = value; + let sample = Sample::new(key, payload) + .with_encoding(encoding) + .with_timestamp(ts); log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { log::error!("[ALIGNER] Error adding sample to storage: {}", e) @@ -136,7 +142,10 @@ impl Aligner { for sample in replies { result.insert( sample.key_expr.into(), - (sample.timestamp.unwrap(), sample.value), + ( + sample.timestamp.unwrap(), + Value::new(sample.payload).with_encoding(sample.encoding), + ), ); } (result, no_err) @@ -202,9 +211,9 @@ impl Aligner { let properties = format!("timestamp={}&{}=cold", other.timestamp, ERA); let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_intervals: HashMap = HashMap::new(); - // expecting sample.value to be a vec of intervals with their checksum + // expecting sample.payload to be a vec of intervals with their checksum for each in reply_content { - match serde_json::from_str(&each.value.to_string()) { + match serde_json::from_str(&StringOrBase64::from(each.payload)) { Ok((i, c)) => { other_intervals.insert(i, c); } @@ -246,11 +255,11 @@ impl Aligner { INTERVALS, diff_string.join(",") ); - // expecting sample.value to be a vec of subintervals with their checksum + // expecting sample.payload to be a vec of subintervals with their checksum let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_subintervals: HashMap = HashMap::new(); for each in reply_content { - match serde_json::from_str(&each.value.to_string()) { + match serde_json::from_str(&StringOrBase64::from(each.payload)) { Ok((i, c)) => { other_subintervals.insert(i, c); } @@ -287,11 +296,11 @@ impl Aligner { SUBINTERVALS, diff_string.join(",") ); - // expecting sample.value to be a vec of log entries with their checksum + // expecting sample.payload to be a vec of log entries with their checksum let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_content: HashMap> = HashMap::new(); for each in reply_content { - match serde_json::from_str(&each.value.to_string()) { + match serde_json::from_str(&StringOrBase64::from(each.payload)) { Ok((i, c)) => { other_content.insert(i, c); } @@ -332,13 +341,13 @@ impl Aligner { log::trace!( "[ALIGNER] Received ('{}': '{}')", sample.key_expr.as_str(), - sample.value + StringOrBase64::from(sample.payload.clone()) ); return_val.push(sample); } Err(err) => { log::error!( - "[ALIGNER] Received error for query on selector {} :{}", + "[ALIGNER] Received error for query on selector {} :{:?}", selector, err ); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index b743a70451..78254213f7 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,6 +26,7 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; +use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; @@ -226,9 +227,9 @@ impl Replica { from, sample.kind, sample.key_expr.as_str(), - sample.value + StringOrBase64::from(sample.payload.clone()) ); - let digest: Digest = match serde_json::from_str(&format!("{}", sample.value)) { + let digest: Digest = match serde_json::from_str(&StringOrBase64::from(sample.payload)) { Ok(digest) => digest, Err(e) => { log::error!("[DIGEST_SUB] Error in decoding the digest: {}", e); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 115ed1e8d9..1ef7e65390 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -180,7 +180,7 @@ impl StorageService { // log error if the sample is not timestamped // This is to reduce down the line inconsistencies of having duplicate samples stored if sample.get_timestamp().is_none() { - log::error!("Sample {} is not timestamped. Please timestamp samples meant for replicated storage.", sample); + log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { self.process_sample(sample).await; @@ -262,7 +262,7 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { - log::trace!("[STORAGE] Processing sample: {}", sample); + log::trace!("[STORAGE] Processing sample: {:?}", sample); // Call incoming data interceptor (if any) let sample = if let Some(ref interceptor) = self.in_interceptor { interceptor(sample) @@ -295,7 +295,7 @@ impl StorageService { && self.is_latest(&k, sample.get_timestamp().unwrap()).await)) { log::trace!( - "Sample `{}` identified as neded processing for key {}", + "Sample `{:?}` identified as neded processing for key {}", sample, k ); @@ -306,15 +306,19 @@ impl StorageService { .await { Some(overriding_update) => { - let mut sample_to_store = - Sample::new(KeyExpr::from(k.clone()), overriding_update.data.value) - .with_timestamp(overriding_update.data.timestamp); + let Value { + payload, encoding, .. + } = overriding_update.data.value; + let mut sample_to_store = Sample::new(KeyExpr::from(k.clone()), payload) + .with_encoding(encoding) + .with_timestamp(overriding_update.data.timestamp); sample_to_store.kind = overriding_update.kind; sample_to_store } None => { let mut sample_to_store = - Sample::new(KeyExpr::from(k.clone()), sample.value.clone()) + Sample::new(KeyExpr::from(k.clone()), sample.payload.clone()) + .with_encoding(sample.encoding.clone()) .with_timestamp(sample.timestamp.unwrap()); sample_to_store.kind = sample.kind; sample_to_store @@ -333,7 +337,8 @@ impl StorageService { storage .put( stripped_key, - sample_to_store.value.clone(), + Value::new(sample_to_store.payload.clone()) + .with_encoding(sample_to_store.encoding.clone()), sample_to_store.timestamp.unwrap(), ) .await @@ -397,7 +402,7 @@ impl StorageService { Update { kind: sample.kind, data: StoredData { - value: sample.value, + value: Value::new(sample.payload).with_encoding(sample.encoding), timestamp: sample.timestamp.unwrap(), }, }, @@ -515,7 +520,11 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let sample = Sample::new(key.clone(), entry.value) + let Value { + payload, encoding, .. + } = entry.value; + let sample = Sample::new(key.clone(), payload) + .with_encoding(encoding) .with_timestamp(entry.timestamp); // apply outgoing interceptor on results let sample = if let Some(ref interceptor) = self.out_interceptor { @@ -549,7 +558,11 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let sample = Sample::new(q.key_expr().clone(), entry.value) + let Value { + payload, encoding, .. + } = entry.value; + let sample = Sample::new(q.key_expr().clone(), payload) + .with_encoding(encoding) .with_timestamp(entry.timestamp); // apply outgoing interceptor on results let sample = if let Some(ref interceptor) = self.out_interceptor { @@ -667,7 +680,7 @@ impl StorageService { self.process_sample(sample).await; } Err(e) => log::warn!( - "Storage '{}' received an error to align query: {}", + "Storage '{}' received an error to align query: {:?}", self.name, e ), @@ -688,15 +701,15 @@ fn serialize_update(update: &Update) -> String { } fn construct_update(data: String) -> Update { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() let mut payload = ZBuf::default(); for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).encoding(Encoding::from(result.2)); + let value = Value::new(payload).with_encoding(result.2); let data = StoredData { value, - timestamp: Timestamp::from_str(&result.1).unwrap(), + timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() }; let kind = if result.0.eq(&(SampleKind::Put).to_string()) { SampleKind::Put diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index a4293f31f1..81029e2fa7 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,6 +20,7 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; +use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::{prelude::Config, time::Timestamp}; @@ -100,7 +101,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/a").await; assert_eq!(data.len(), 1); - assert_eq!(format!("{}", data[0].value), "1"); + assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "1"); put_data( &session, @@ -116,7 +117,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(format!("{}", data[0].value), "2"); + assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); delete_data( &session, @@ -135,7 +136,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(format!("{}", data[0].value), "2"); + assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); assert_eq!(data[0].key_expr.as_str(), "operation/test/b"); drop(storage); diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 60970b2247..4808ec246f 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,6 +21,7 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; +use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::{prelude::Config, time::Timestamp}; @@ -117,7 +118,7 @@ async fn test_wild_card_in_order() { let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 1); assert_eq!(data[0].key_expr.as_str(), "wild/test/a"); - assert_eq!(format!("{}", data[0].value), "2"); + assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); put_data( &session, @@ -135,8 +136,8 @@ async fn test_wild_card_in_order() { assert_eq!(data.len(), 2); assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); - assert!(["2", "3"].contains(&format!("{}", data[0].value).as_str())); - assert!(["2", "3"].contains(&format!("{}", data[1].value).as_str())); + assert!(["2", "3"].contains(&StringOrBase64::from(data[0].payload.clone()).as_str())); + assert!(["2", "3"].contains(&StringOrBase64::from(data[1].payload.clone()).as_str())); put_data( &session, @@ -154,8 +155,8 @@ async fn test_wild_card_in_order() { assert_eq!(data.len(), 2); assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); - assert_eq!(format!("{}", data[0].value).as_str(), "4"); - assert_eq!(format!("{}", data[1].value).as_str(), "4"); + assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "4"); + assert_eq!(StringOrBase64::from(data[1].payload.clone()).as_str(), "4"); delete_data( &session, diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 91b0283ddb..7ee6e7213c 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -37,7 +37,10 @@ env_logger = { workspace = true } flume = { workspace = true } futures = { workspace = true } log = { workspace = true } +phf = { workspace = true } serde = { workspace = true, features = ["default"] } +serde_cbor = { workspace = true } +serde_json = { workspace = true } zenoh = { workspace = true, features = ["unstable"], default-features = false } zenoh-core = { workspace = true } zenoh-macros = { workspace = true } diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index 73433ebf14..80efc0854f 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -17,9 +17,7 @@ use clap::Command; use futures::prelude::*; use futures::select; use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; -use zenoh::query::ReplyKeyExpr; +use zenoh::{config::Config, prelude::r#async::*, query::ReplyKeyExpr}; use zenoh_ext::*; #[async_std::main] @@ -62,8 +60,8 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - println!(">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, sample.key_expr.as_str(), sample.value); + let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(), payload); }, _ = stdin.read_exact(&mut input).fuse() => { diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index aece581fde..9078e61741 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -252,7 +252,7 @@ async fn net_event_handler(z: Arc, state: Arc) { .await .unwrap(); while let Ok(s) = sub.recv_async().await { - match bincode::deserialize::(&(s.value.payload.contiguous())) { + match bincode::deserialize::(&(s.payload.contiguous())) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { log::debug!("Member join: {:?}", &je.member); @@ -342,7 +342,7 @@ async fn net_event_handler(z: Arc, state: Arc) { } } Err(e) => { - log::warn!("Error received: {}", e); + log::warn!("Error received: {:?}", e); } } } diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 7440d80a53..7ac880fd8c 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -23,6 +23,9 @@ pub use querying_subscriber::{ pub use session_ext::SessionExt; pub use subscriber_ext::SubscriberBuilderExt; pub use subscriber_ext::SubscriberForward; +use zenoh::query::Reply; +use zenoh::{sample::Sample, Result as ZResult}; +use zenoh_core::zerror; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { @@ -51,3 +54,13 @@ impl From for KeySpace { KeySpace::Liveliness } } + +pub trait ExtractSample { + fn extract(self) -> ZResult; +} + +impl ExtractSample for Reply { + fn extract(self) -> ZResult { + self.sample.map_err(|e| zerror!("{:?}", e).into()) + } +} diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 4a7c4f2ded..2c89ec82ae 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -26,6 +26,8 @@ use zenoh::Result as ZResult; use zenoh::SessionRef; use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; +use crate::ExtractSample; + /// The builder of [`FetchingSubscriber`], allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> { @@ -350,8 +352,7 @@ pub struct FetchingSubscriberBuilder< Fetch: FnOnce(Box) -> ZResult<()>, TryIntoSample, > where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { pub(crate) session: SessionRef<'a>, pub(crate) key_expr: ZResult>, @@ -372,8 +373,7 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { fn with_static_keys( self, @@ -399,8 +399,7 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { /// Add callback to [`FetchingSubscriber`]. #[inline] @@ -496,8 +495,7 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, crate::UserSpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { /// Change the subscription reliability. #[inline] @@ -540,8 +538,7 @@ impl< where Handler: IntoCallbackReceiverPair<'static, Sample>, Handler::Receiver: Send, - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { type To = ZResult>; } @@ -556,8 +553,7 @@ where KeySpace: Into, Handler: IntoCallbackReceiverPair<'static, Sample> + Send, Handler::Receiver: Send, - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + TryIntoSample: ExtractSample + Send + Sync, { fn res_sync(self) -> ::To { FetchingSubscriber::new(self.with_static_keys()) @@ -575,8 +571,7 @@ where KeySpace: Into, Handler: IntoCallbackReceiverPair<'static, Sample> + Send, Handler::Receiver: Send, - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + TryIntoSample: ExtractSample + Send + Sync, { type Future = Ready; @@ -649,8 +644,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { where KeySpace: Into, Handler: IntoCallbackReceiverPair<'static, Sample, Receiver = Receiver> + Send, - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + TryIntoSample: ExtractSample + Send + Sync, { let state = Arc::new(Mutex::new(InnerState { pending_fetches: 0, @@ -769,8 +763,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { fetch: Fetch, ) -> impl Resolve> where - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + TryIntoSample: ExtractSample + Send + Sync, { FetchBuilder { fetch, @@ -846,8 +839,7 @@ pub struct FetchBuilder< Fetch: FnOnce(Box) -> ZResult<()>, TryIntoSample, > where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { fetch: Fetch, phantom: std::marker::PhantomData, @@ -858,8 +850,7 @@ pub struct FetchBuilder< impl) -> ZResult<()>, TryIntoSample> Resolvable for FetchBuilder where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { type To = ZResult<()>; } @@ -867,8 +858,7 @@ where impl) -> ZResult<()>, TryIntoSample> SyncResolve for FetchBuilder where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { fn res_sync(self) -> ::To { let handler = register_handler(self.state, self.callback); @@ -879,8 +869,7 @@ where impl) -> ZResult<()>, TryIntoSample> AsyncResolve for FetchBuilder where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { type Future = Ready; @@ -906,16 +895,15 @@ fn run_fetch< handler: RepliesHandler, ) -> ZResult<()> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { log::debug!("Fetch data for FetchingSubscriber"); - (fetch)(Box::new(move |s: TryIntoSample| match s.try_into() { + (fetch)(Box::new(move |s: TryIntoSample| match s.extract() { Ok(s) => { let mut state = zlock!(handler.state); log::trace!("Fetched sample received: push it to merge_queue"); state.merge_queue.push(s); } - Err(e) => log::debug!("Received error fetching data: {}", e.into()), + Err(e) => log::debug!("Received error fetching data: {}", e), })) } diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 83de47779c..89d3b5f691 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -13,7 +13,7 @@ // use flume::r#async::RecvStream; use futures::stream::{Forward, Map}; -use std::{convert::TryInto, time::Duration}; +use std::time::Duration; use zenoh::query::ReplyKeyExpr; use zenoh::sample::Locality; use zenoh::Result as ZResult; @@ -24,6 +24,7 @@ use zenoh::{ subscriber::{PushMode, Reliability, Subscriber, SubscriberBuilder}, }; +use crate::ExtractSample; use crate::{querying_subscriber::QueryingSubscriberBuilder, FetchingSubscriberBuilder}; /// Allows writing `subscriber.forward(receiver)` instead of `subscriber.stream().map(Ok).forward(publisher)` @@ -87,8 +88,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into; + TryIntoSample: ExtractSample; /// Create a [`FetchingSubscriber`](super::FetchingSubscriber) that will perform a query (`session.get()`) as it's /// initial fetch. @@ -169,8 +169,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { FetchingSubscriberBuilder { session: self.session, @@ -283,8 +282,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { FetchingSubscriberBuilder { session: self.session, diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 11ecfad1bf..e6f7a4d9aa 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -79,10 +79,14 @@ log = { workspace = true } ordered-float = { workspace = true } paste = { workspace = true } petgraph = { workspace = true } +phf = { workspace = true } rand = { workspace = true, features = ["default"] } regex = { workspace = true } serde = { workspace = true, features = ["default"] } +serde_cbor = { workspace = true } serde_json = { workspace = true } +serde-pickle = { workspace = true } +serde_yaml = { workspace = true } socket2 = { workspace = true } stop-token = { workspace = true } uhlc = { workspace = true, features = ["default"] } diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 8cdf638af5..5a242d51b7 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -12,11 +12,12 @@ // ZettaScale Zenoh Team, // use crate::{ + encoding::Encoding, keyexpr, prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, sample::DataInfo, - Sample, Session, ZResult, + Payload, Sample, Session, ZResult, }; use async_std::task; use std::{ @@ -25,10 +26,7 @@ use std::{ sync::Arc, }; use zenoh_core::SyncResolve; -use zenoh_protocol::{ - core::{Encoding, KnownEncoding, WireExpr}, - network::NetworkMessage, -}; +use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use zenoh_transport::{ TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; @@ -71,7 +69,12 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { let key_expr = *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { - let _ = query.reply(Ok(Sample::new(key_expr, value))).res_sync(); + match Payload::try_from(value) { + Ok(zbuf) => { + let _ = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync(); + } + Err(e) => log::debug!("Admin query error: {}", e), + } } } @@ -83,7 +86,12 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid / *KE_LINK / lid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(link) { - let _ = query.reply(Ok(Sample::new(key_expr, value))).res_sync(); + match Payload::try_from(value) { + Ok(zbuf) => { + let _ = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync(); + } + Err(e) => log::debug!("Admin query error: {}", e), + } } } } @@ -145,7 +153,7 @@ impl TransportMulticastEventHandler for Handler { let expr = WireExpr::from(&(*KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid)) .to_owned(); let info = DataInfo { - encoding: Some(Encoding::Exact(KnownEncoding::AppJson)), + encoding: Some(Encoding::APPLICATION_JSON), ..Default::default() }; self.session.handle_data( @@ -191,7 +199,7 @@ impl TransportPeerEventHandler for PeerHandler { let mut s = DefaultHasher::new(); link.hash(&mut s); let info = DataInfo { - encoding: Some(Encoding::Exact(KnownEncoding::AppJson)), + encoding: Some(Encoding::APPLICATION_JSON), ..Default::default() }; self.session.handle_data( diff --git a/zenoh/src/encoding.rs b/zenoh/src/encoding.rs new file mode 100644 index 0000000000..d9fa725ed5 --- /dev/null +++ b/zenoh/src/encoding.rs @@ -0,0 +1,850 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::payload::Payload; +use phf::phf_map; +use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; +use zenoh_buffers::{ZBuf, ZSlice}; +use zenoh_protocol::core::EncodingId; +#[cfg(feature = "shared-memory")] +use ::{std::sync::Arc, zenoh_shm::SharedMemoryBuf}; + +/// Default encoding values used by Zenoh. +/// +/// An encoding has a similar role to Content-type in HTTP: it indicates, when present, how data should be interpreted by the application. +/// +/// Please note the Zenoh protocol does not impose any encoding value nor it operates on it. +/// It can be seen as some optional metadata that is carried over by Zenoh in such a way the application may perform different operations depending on the encoding value. +/// +/// A set of associated constants are provided to cover the most common encodings for user convenience. +/// This is parcticular useful in helping Zenoh to perform additional network optimizations. +/// +/// # Examples +/// +/// ### String operations +/// +/// Create an [`Encoding`] from a string and viceversa. +/// ``` +/// use zenoh::prelude::Encoding; +/// +/// let encoding: Encoding = "text/plain".into(); +/// let text: String = encoding.clone().into(); +/// assert_eq!("text/plain", &text); +/// ``` +/// +/// ### Constants and cow operations +/// +/// Since some encoding values are internally optimized by Zenoh, it's generally more efficient to use +/// the defined constants and [`Cow`][std::borrow::Cow] conversion to obtain its string representation. +/// ``` +/// use zenoh::prelude::Encoding; +/// use std::borrow::Cow; +/// +/// // This allocates +/// assert_eq!("text/plain", &String::from(Encoding::TEXT_PLAIN)); +/// // This does NOT allocate +/// assert_eq!("text/plain", &Cow::from(Encoding::TEXT_PLAIN)); +/// ``` +/// +/// ### Schema +/// +/// Additionally, a schema can be associated to the encoding. +/// The convetions is to use the `;` separator if an encoding is created from a string. +/// Alternatively, [`with_schema()`](Encoding::with_schema) can be used to add a schme to one of the associated constants. +/// ``` +/// use zenoh::prelude::Encoding; +/// +/// let encoding1 = Encoding::from("text/plain;utf-8"); +/// let encoding2 = Encoding::TEXT_PLAIN.with_schema("utf-8"); +/// assert_eq!(encoding1, encoding2); +/// assert_eq!("text/plain;utf-8", &encoding1.to_string()); +/// assert_eq!("text/plain;utf-8", &encoding2.to_string()); +/// ``` +#[repr(transparent)] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Encoding(zenoh_protocol::core::Encoding); + +impl Encoding { + const SCHEMA_SEP: char = ';'; + + // For compatibility purposes Zenoh reserves any prefix value from `0` to `1023` included. + + // - Primitives types supported in all Zenoh bindings + /// Just some bytes. + /// + /// Constant alias for string: `"zenoh/bytes"`. + pub const ZENOH_BYTES: Encoding = Self(zenoh_protocol::core::Encoding { + id: 0, + schema: None, + }); + /// A VLE-encoded signed little-endian integer. Either 8bit, 16bit, 32bit, or 64bit. Binary reprensentation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int"`. + pub const ZENOH_INT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 1, + schema: None, + }); + /// A VLE-encoded little-endian unsigned integer. Either 8bit, 16bit, 32bit, or 64bit. + /// + /// Constant alias for string: `"zenoh/uint"`. + pub const ZENOH_UINT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 2, + schema: None, + }); + /// A VLE-encoded float. Either little-endian 32bit or 64bit. Binary representation uses *IEEE 754-2008* *binary32* or *binary64*, respectively. + /// + /// Constant alias for string: `"zenoh/float"`. + pub const ZENOH_FLOAT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 3, + schema: None, + }); + /// A boolean. `0` is `false`, `1` is `true`. Other values are invalid. + /// + /// Constant alias for string: `"zenoh/bool"`. + pub const ZENOH_BOOL: Encoding = Self(zenoh_protocol::core::Encoding { + id: 4, + schema: None, + }); + /// A UTF-8 string. + /// + /// Constant alias for string: `"zenoh/string"`. + pub const ZENOH_STRING: Encoding = Self(zenoh_protocol::core::Encoding { + id: 5, + schema: None, + }); + /// A zenoh error. + /// + /// Constant alias for string: `"zenoh/error"`. + pub const ZENOH_ERROR: Encoding = Self(zenoh_protocol::core::Encoding { + id: 6, + schema: None, + }); + + // - Advanced types may be supported in some of the Zenoh bindings. + /// An application-specific stream of bytes. + /// + /// Constant alias for string: `"application/octet-stream"`. + pub const APPLICATION_OCTET_STREAM: Encoding = Self(zenoh_protocol::core::Encoding { + id: 7, + schema: None, + }); + /// A textual file. + /// + /// Constant alias for string: `"text/plain"`. + pub const TEXT_PLAIN: Encoding = Self(zenoh_protocol::core::Encoding { + id: 8, + schema: None, + }); + /// JSON data intended to be consumed by an application. + /// + /// Constant alias for string: `"application/json"`. + pub const APPLICATION_JSON: Encoding = Self(zenoh_protocol::core::Encoding { + id: 9, + schema: None, + }); + /// JSON data intended to be human readable. + /// + /// Constant alias for string: `"text/json"`. + pub const TEXT_JSON: Encoding = Self(zenoh_protocol::core::Encoding { + id: 10, + schema: None, + }); + /// A Common Data Representation (CDR)-encoded data. + /// + /// Constant alias for string: `"application/cdr"`. + pub const APPLICATION_CDR: Encoding = Self(zenoh_protocol::core::Encoding { + id: 11, + schema: None, + }); + /// A Concise Binary Object Representation (CBOR)-encoded data. + /// + /// Constant alias for string: `"application/cbor"`. + pub const APPLICATION_CBOR: Encoding = Self(zenoh_protocol::core::Encoding { + id: 12, + schema: None, + }); + /// YAML data intended to be consumed by an application. + /// + /// Constant alias for string: `"application/yaml"`. + pub const APPLICATION_YAML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 13, + schema: None, + }); + /// YAML data intended to be human readable. + /// + /// Constant alias for string: `"text/yaml"`. + pub const TEXT_YAML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 14, + schema: None, + }); + /// JSON5 encoded data that are human readable. + /// + /// Constant alias for string: `"text/json5"`. + pub const TEXT_JSON5: Encoding = Self(zenoh_protocol::core::Encoding { + id: 15, + schema: None, + }); + /// A Python object serialized using [pickle](https://docs.python.org/3/library/pickle.html). + /// + /// Constant alias for string: `"application/python-serialized-object"`. + pub const APPLICATION_PYTHON_SERIALIZED_OBJECT: Encoding = + Self(zenoh_protocol::core::Encoding { + id: 16, + schema: None, + }); + /// An application-specific protobuf-encoded data. + /// + /// Constant alias for string: `"application/protobuf"`. + pub const APPLICATION_PROTOBUF: Encoding = Self(zenoh_protocol::core::Encoding { + id: 17, + schema: None, + }); + /// A Java serialized object. + /// + /// Constant alias for string: `"application/java-serialized-object"`. + pub const APPLICATION_JAVA_SERIALIZED_OBJECT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 18, + schema: None, + }); + /// An [openmetrics](https://github.com/OpenObservability/OpenMetrics) data, common used by [Prometheus](https://prometheus.io/). + /// + /// Constant alias for string: `"application/openmetrics-text"`. + pub const APPLICATION_OPENMETRICS_TEXT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 19, + schema: None, + }); + /// A Portable Network Graphics (PNG) image. + /// + /// Constant alias for string: `"image/png"`. + pub const IMAGE_PNG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 20, + schema: None, + }); + /// A Joint Photographic Experts Group (JPEG) image. + /// + /// Constant alias for string: `"image/jpeg"`. + pub const IMAGE_JPEG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 21, + schema: None, + }); + /// A Graphics Interchange Format (GIF) image. + /// + /// Constant alias for string: `"image/gif"`. + pub const IMAGE_GIF: Encoding = Self(zenoh_protocol::core::Encoding { + id: 22, + schema: None, + }); + /// A BitMap (BMP) image. + /// + /// Constant alias for string: `"image/bmp"`. + pub const IMAGE_BMP: Encoding = Self(zenoh_protocol::core::Encoding { + id: 23, + schema: None, + }); + /// A Web Protable (WebP) image. + /// + /// Constant alias for string: `"image/webp"`. + pub const IMAGE_WEBP: Encoding = Self(zenoh_protocol::core::Encoding { + id: 24, + schema: None, + }); + /// An XML file intended to be consumed by an application.. + /// + /// Constant alias for string: `"application/xml"`. + pub const APPLICATION_XML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 25, + schema: None, + }); + /// An encoded a list of tuples, each consisting of a name and a value. + /// + /// Constant alias for string: `"application/x-www-form-urlencoded"`. + pub const APPLICATION_X_WWW_FORM_URLENCODED: Encoding = Self(zenoh_protocol::core::Encoding { + id: 26, + schema: None, + }); + /// An HTML file. + /// + /// Constant alias for string: `"text/html"`. + pub const TEXT_HTML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 27, + schema: None, + }); + /// An XML file that is human readable. + /// + /// Constant alias for string: `"text/xml"`. + pub const TEXT_XML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 28, + schema: None, + }); + /// A CSS file. + /// + /// Constant alias for string: `"text/css"`. + pub const TEXT_CSS: Encoding = Self(zenoh_protocol::core::Encoding { + id: 29, + schema: None, + }); + /// A JavaScript file. + /// + /// Constant alias for string: `"text/javascript"`. + pub const TEXT_JAVASCRIPT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 30, + schema: None, + }); + /// A MarkDown file. + /// + /// Constant alias for string: `"text/markdown"`. + pub const TEXT_MARKDOWN: Encoding = Self(zenoh_protocol::core::Encoding { + id: 31, + schema: None, + }); + /// A CSV file. + /// + /// Constant alias for string: `"text/csv"`. + pub const TEXT_CSV: Encoding = Self(zenoh_protocol::core::Encoding { + id: 32, + schema: None, + }); + /// An application-specific SQL query. + /// + /// Constant alias for string: `"application/sql"`. + pub const APPLICATION_SQL: Encoding = Self(zenoh_protocol::core::Encoding { + id: 33, + schema: None, + }); + /// Constrained Application Protocol (CoAP) data intended for CoAP-to-HTTP and HTTP-to-CoAP proxies. + /// + /// Constant alias for string: `"application/coap-payload"`. + pub const APPLICATION_COAP_PAYLOAD: Encoding = Self(zenoh_protocol::core::Encoding { + id: 34, + schema: None, + }); + /// Defines a JSON document structure for expressing a sequence of operations to apply to a JSON document. + /// + /// Constant alias for string: `"application/json-patch+json"`. + pub const APPLICATION_JSON_PATCH_JSON: Encoding = Self(zenoh_protocol::core::Encoding { + id: 35, + schema: None, + }); + /// A JSON text sequence consists of any number of JSON texts, all encoded in UTF-8. + /// + /// Constant alias for string: `"application/json-seq"`. + pub const APPLICATION_JSON_SEQ: Encoding = Self(zenoh_protocol::core::Encoding { + id: 36, + schema: None, + }); + /// A JSONPath defines a string syntax for selecting and extracting JSON values from within a given JSON value. + /// + /// Constant alias for string: `"application/jsonpath"`. + pub const APPLICATION_JSONPATH: Encoding = Self(zenoh_protocol::core::Encoding { + id: 37, + schema: None, + }); + /// A JSON Web Token (JWT). + /// + /// Constant alias for string: `"application/jwt"`. + pub const APPLICATION_JWT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 38, + schema: None, + }); + /// An application-specific MPEG-4 encoded data, either audio or video. + /// + /// Constant alias for string: `"application/mp4"`. + pub const APPLICATION_MP4: Encoding = Self(zenoh_protocol::core::Encoding { + id: 39, + schema: None, + }); + /// A SOAP 1.2 message serialized as XML 1.0. + /// + /// Constant alias for string: `"application/soap+xml"`. + pub const APPLICATION_SOAP_XML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 40, + schema: None, + }); + /// A YANG-encoded data commonly used by the Network Configuration Protocol (NETCONF). + /// + /// Constant alias for string: `"application/yang"`. + pub const APPLICATION_YANG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 41, + schema: None, + }); + /// A MPEG-4 Advanced Audio Coding (AAC) media. + /// + /// Constant alias for string: `"audio/aac"`. + pub const AUDIO_AAC: Encoding = Self(zenoh_protocol::core::Encoding { + id: 42, + schema: None, + }); + /// A Free Lossless Audio Codec (FLAC) media. + /// + /// Constant alias for string: `"audio/flac"`. + pub const AUDIO_FLAC: Encoding = Self(zenoh_protocol::core::Encoding { + id: 43, + schema: None, + }); + /// An audio codec defined in MPEG-1, MPEG-2, MPEG-4, or registered at the MP4 registration authority. + /// + /// Constant alias for string: `"audio/mp4"`. + pub const AUDIO_MP4: Encoding = Self(zenoh_protocol::core::Encoding { + id: 44, + schema: None, + }); + /// An Ogg-encapsulated audio stream. + /// + /// Constant alias for string: `"audio/ogg"`. + pub const AUDIO_OGG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 45, + schema: None, + }); + /// A Vorbis-encoded audio stream. + /// + /// Constant alias for string: `"audio/vorbis"`. + pub const AUDIO_VORBIS: Encoding = Self(zenoh_protocol::core::Encoding { + id: 46, + schema: None, + }); + /// A h261-encoded video stream. + /// + /// Constant alias for string: `"video/h261"`. + pub const VIDEO_H261: Encoding = Self(zenoh_protocol::core::Encoding { + id: 47, + schema: None, + }); + /// A h263-encoded video stream. + /// + /// Constant alias for string: `"video/h263"`. + pub const VIDEO_H263: Encoding = Self(zenoh_protocol::core::Encoding { + id: 48, + schema: None, + }); + /// A h264-encoded video stream. + /// + /// Constant alias for string: `"video/h264"`. + pub const VIDEO_H264: Encoding = Self(zenoh_protocol::core::Encoding { + id: 49, + schema: None, + }); + /// A h265-encoded video stream. + /// + /// Constant alias for string: `"video/h265"`. + pub const VIDEO_H265: Encoding = Self(zenoh_protocol::core::Encoding { + id: 50, + schema: None, + }); + /// A h266-encoded video stream. + /// + /// Constant alias for string: `"video/h266"`. + pub const VIDEO_H266: Encoding = Self(zenoh_protocol::core::Encoding { + id: 51, + schema: None, + }); + /// A video codec defined in MPEG-1, MPEG-2, MPEG-4, or registered at the MP4 registration authority. + /// + /// Constant alias for string: `"video/mp4"`. + pub const VIDEO_MP4: Encoding = Self(zenoh_protocol::core::Encoding { + id: 52, + schema: None, + }); + /// An Ogg-encapsulated video stream. + /// + /// Constant alias for string: `"video/ogg"`. + pub const VIDEO_OGG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 53, + schema: None, + }); + /// An uncompressed, studio-quality video stream. + /// + /// Constant alias for string: `"video/raw"`. + pub const VIDEO_RAW: Encoding = Self(zenoh_protocol::core::Encoding { + id: 54, + schema: None, + }); + /// A VP8-encoded video stream. + /// + /// Constant alias for string: `"video/vp8"`. + pub const VIDEO_VP8: Encoding = Self(zenoh_protocol::core::Encoding { + id: 55, + schema: None, + }); + /// A VP9-encoded video stream. + /// + /// Constant alias for string: `"video/vp9"`. + pub const VIDEO_VP9: Encoding = Self(zenoh_protocol::core::Encoding { + id: 56, + schema: None, + }); + + const ID_TO_STR: phf::Map = phf_map! { + 0u16 => "zenoh/bytes", + 1u16 => "zenoh/int", + 2u16 => "zenoh/uint", + 3u16 => "zenoh/float", + 4u16 => "zenoh/bool", + 5u16 => "zenoh/string", + 6u16 => "zenoh/error", + 7u16 => "application/octet-stream", + 8u16 => "text/plain", + 9u16 => "application/json", + 10u16 => "text/json", + 11u16 => "application/cdr", + 12u16 => "application/cbor", + 13u16 => "application/yaml", + 14u16 => "text/yaml", + 15u16 => "text/json5", + 16u16 => "application/python-serialized-object", + 17u16 => "application/protobuf", + 18u16 => "application/java-serialized-object", + 19u16 => "application/openmetrics-text", + 20u16 => "image/png", + 21u16 => "image/jpeg", + 22u16 => "image/gif", + 23u16 => "image/bmp", + 24u16 => "image/webp", + 25u16 => "application/xml", + 26u16 => "application/x-www-form-urlencoded", + 27u16 => "text/html", + 28u16 => "text/xml", + 29u16 => "text/css", + 30u16 => "text/javascript", + 31u16 => "text/markdown", + 32u16 => "text/csv", + 33u16 => "application/sql", + 34u16 => "application/coap-payload", + 35u16 => "application/json-patch+json", + 36u16 => "application/json-seq", + 37u16 => "application/jsonpath", + 38u16 => "application/jwt", + 39u16 => "application/mp4", + 40u16 => "application/soap+xml", + 41u16 => "application/yang", + 42u16 => "audio/aac", + 43u16 => "audio/flac", + 44u16 => "audio/mp4", + 45u16 => "audio/ogg", + 46u16 => "audio/vorbis", + 47u16 => "video/h261", + 48u16 => "video/h263", + 49u16 => "video/h264", + 50u16 => "video/h265", + 51u16 => "video/h266", + 52u16 => "video/mp4", + 53u16 => "video/ogg", + 54u16 => "video/raw", + 55u16 => "video/vp8", + 56u16 => "video/vp9", + }; + + const STR_TO_ID: phf::Map<&'static str, EncodingId> = phf_map! { + "zenoh/bytes" => 0u16, + "zenoh/int" => 1u16, + "zenoh/uint" => 2u16, + "zenoh/float" => 3u16, + "zenoh/bool" => 4u16, + "zenoh/string" => 5u16, + "zenoh/error" => 6u16, + "application/octet-stream" => 7u16, + "text/plain" => 8u16, + "application/json" => 9u16, + "text/json" => 10u16, + "application/cdr" => 11u16, + "application/cbor" => 12u16, + "application/yaml" => 13u16, + "text/yaml" => 14u16, + "text/json5" => 15u16, + "application/python-serialized-object" => 16u16, + "application/protobuf" => 17u16, + "application/java-serialized-object" => 18u16, + "application/openmetrics-text" => 19u16, + "image/png" => 20u16, + "image/jpeg" => 21u16, + "image/gif" => 22u16, + "image/bmp" => 23u16, + "image/webp" => 24u16, + "application/xml" => 25u16, + "application/x-www-form-urlencoded" => 26u16, + "text/html" => 27u16, + "text/xml" => 28u16, + "text/css" => 29u16, + "text/javascript" => 30u16, + "text/markdown" => 31u16, + "text/csv" => 32u16, + "application/sql" => 33u16, + "application/coap-payload" => 34u16, + "application/json-patch+json" => 35u16, + "application/json-seq" => 36u16, + "application/jsonpath" => 37u16, + "application/jwt" => 38u16, + "application/mp4" => 39u16, + "application/soap+xml" => 40u16, + "application/yang" => 41u16, + "audio/aac" => 42u16, + "audio/flac" => 43u16, + "audio/mp4" => 44u16, + "audio/ogg" => 45u16, + "audio/vorbis" => 46u16, + "video/h261" => 47u16, + "video/h263" => 48u16, + "video/h264" => 49u16, + "video/h265" => 50u16, + "video/h266" => 51u16, + "video/mp4" => 52u16, + "video/ogg" => 53u16, + "video/raw" => 54u16, + "video/vp8" => 55u16, + "video/vp9" => 56u16, + }; + + /// The default [`Encoding`] is [`ZENOH_BYTES`](Encoding::ZENOH_BYTES). + pub const fn default() -> Self { + Self::ZENOH_BYTES + } + + /// Set a schema to this encoding. Zenoh does not define what a schema is and its semantichs is left to the implementer. + /// E.g. a common schema for `text/plain` encoding is `utf-8`. + pub fn with_schema(mut self, s: S) -> Self + where + S: Into, + { + let s: String = s.into(); + self.0.schema = Some(s.into_boxed_str().into_boxed_bytes().into()); + self + } +} + +impl Default for Encoding { + fn default() -> Self { + Self::default() + } +} + +impl From<&str> for Encoding { + fn from(t: &str) -> Self { + let mut inner = zenoh_protocol::core::Encoding::empty(); + + // Check if empty + if t.is_empty() { + return Encoding(inner); + } + + // Everything before `;` may be mapped to a known id + let (id, schema) = t.split_once(Encoding::SCHEMA_SEP).unwrap_or((t, "")); + if let Some(id) = Encoding::STR_TO_ID.get(id).copied() { + inner.id = id; + }; + if !schema.is_empty() { + inner.schema = Some(ZSlice::from(schema.to_string().into_bytes())); + } + + Encoding(inner) + } +} + +impl From for Encoding { + fn from(value: String) -> Self { + Self::from(value.as_str()) + } +} + +impl FromStr for Encoding { + type Err = Infallible; + + fn from_str(s: &str) -> Result { + Ok(Self::from(s)) + } +} + +impl From<&Encoding> for Cow<'static, str> { + fn from(encoding: &Encoding) -> Self { + fn su8_to_str(schema: &[u8]) -> &str { + std::str::from_utf8(schema).unwrap_or("unknown(non-utf8)") + } + + match ( + Encoding::ID_TO_STR.get(&encoding.0.id).copied(), + encoding.0.schema.as_ref(), + ) { + // Perfect match + (Some(i), None) => Cow::Borrowed(i), + // ID and schema + (Some(i), Some(s)) => { + Cow::Owned(format!("{}{}{}", i, Encoding::SCHEMA_SEP, su8_to_str(s))) + } + // + (None, Some(s)) => Cow::Owned(format!( + "unknown({}){}{}", + encoding.0.id, + Encoding::SCHEMA_SEP, + su8_to_str(s) + )), + (None, None) => Cow::Owned(format!("unknown({})", encoding.0.id)), + } + } +} + +impl From for Cow<'static, str> { + fn from(encoding: Encoding) -> Self { + Self::from(&encoding) + } +} + +impl From for String { + fn from(encoding: Encoding) -> Self { + encoding.to_string() + } +} + +impl From for zenoh_protocol::core::Encoding { + fn from(value: Encoding) -> Self { + value.0 + } +} + +impl From for Encoding { + fn from(value: zenoh_protocol::core::Encoding) -> Self { + Self(value) + } +} + +impl fmt::Display for Encoding { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { + let s = Cow::from(self); + f.write_str(s.as_ref()) + } +} + +// - Encoding trait +pub trait EncodingMapping { + const ENCODING: Encoding; +} + +// Bytes +impl EncodingMapping for Payload { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for ZBuf { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for Vec { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for &[u8] { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for Cow<'_, [u8]> { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +// String +impl EncodingMapping for String { + const ENCODING: Encoding = Encoding::ZENOH_STRING; +} + +impl EncodingMapping for &str { + const ENCODING: Encoding = Encoding::ZENOH_STRING; +} + +impl EncodingMapping for Cow<'_, str> { + const ENCODING: Encoding = Encoding::ZENOH_STRING; +} + +// Zenoh unsigned integers +impl EncodingMapping for u8 { + const ENCODING: Encoding = Encoding::ZENOH_UINT; +} + +impl EncodingMapping for u16 { + const ENCODING: Encoding = Encoding::ZENOH_UINT; +} + +impl EncodingMapping for u32 { + const ENCODING: Encoding = Encoding::ZENOH_UINT; +} + +impl EncodingMapping for u64 { + const ENCODING: Encoding = Encoding::ZENOH_UINT; +} + +impl EncodingMapping for usize { + const ENCODING: Encoding = Encoding::ZENOH_UINT; +} + +// Zenoh signed integers +impl EncodingMapping for i8 { + const ENCODING: Encoding = Encoding::ZENOH_INT; +} + +impl EncodingMapping for i16 { + const ENCODING: Encoding = Encoding::ZENOH_INT; +} + +impl EncodingMapping for i32 { + const ENCODING: Encoding = Encoding::ZENOH_INT; +} + +impl EncodingMapping for i64 { + const ENCODING: Encoding = Encoding::ZENOH_INT; +} + +impl EncodingMapping for isize { + const ENCODING: Encoding = Encoding::ZENOH_INT; +} + +// Zenoh floats +impl EncodingMapping for f32 { + const ENCODING: Encoding = Encoding::ZENOH_FLOAT; +} + +impl EncodingMapping for f64 { + const ENCODING: Encoding = Encoding::ZENOH_FLOAT; +} + +// Zenoh bool +impl EncodingMapping for bool { + const ENCODING: Encoding = Encoding::ZENOH_BOOL; +} + +// - Zenoh advanced types encoders/decoders +impl EncodingMapping for serde_json::Value { + const ENCODING: Encoding = Encoding::APPLICATION_JSON; +} + +impl EncodingMapping for serde_yaml::Value { + const ENCODING: Encoding = Encoding::APPLICATION_YAML; +} + +impl EncodingMapping for serde_cbor::Value { + const ENCODING: Encoding = Encoding::APPLICATION_CBOR; +} + +impl EncodingMapping for serde_pickle::Value { + const ENCODING: Encoding = Encoding::APPLICATION_PYTHON_SERIALIZED_OBJECT; +} + +// - Zenoh SHM +#[cfg(feature = "shared-memory")] +impl EncodingMapping for Arc { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +#[cfg(feature = "shared-memory")] +impl EncodingMapping for Box { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +#[cfg(feature = "shared-memory")] +impl EncodingMapping for SharedMemoryBuf { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 0a8f1feb64..bae81d3a54 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -53,7 +53,7 @@ //! let session = zenoh::open(config::default()).res().await.unwrap(); //! let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); //! while let Ok(sample) = subscriber.recv_async().await { -//! println!("Received: {}", sample); +//! println!("Received: {:?}", sample); //! }; //! } //! ``` @@ -79,9 +79,11 @@ extern crate zenoh_core; #[macro_use] extern crate zenoh_result; +pub(crate) type Id = usize; + use git_version::git_version; use handlers::DefaultHandler; -#[zenoh_macros::unstable] +#[cfg(feature = "unstable")] use net::runtime::Runtime; use prelude::*; use scouting::ScoutBuilder; @@ -132,10 +134,12 @@ pub use net::runtime; pub mod selector; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; +pub(crate) mod encoding; pub mod handlers; pub mod info; #[cfg(feature = "unstable")] pub mod liveliness; +pub mod payload; pub mod plugins; pub mod prelude; pub mod publication; @@ -168,23 +172,6 @@ pub mod time { } } -/// A map of key/value (String,String) properties. -pub mod properties { - use super::prelude::Value; - pub use zenoh_collections::Properties; - - /// Convert a set of [`Properties`] into a [`Value`]. - /// For instance, Properties: `[("k1", "v1"), ("k2, v2")]` - /// is converted into Json: `{ "k1": "v1", "k2": "v2" }` - pub fn properties_to_json_value(props: &Properties) -> Value { - let json_map = props - .iter() - .map(|(k, v)| (k.clone(), serde_json::Value::String(v.clone()))) - .collect::>(); - serde_json::Value::Object(json_map).into() - } -} - /// Scouting primitives. pub mod scouting; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 26a803fa43..9f14866363 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -15,8 +15,7 @@ //! Liveliness primitives. //! //! see [`Liveliness`] - -use crate::query::Reply; +use crate::{query::Reply, Id}; #[zenoh_macros::unstable] use { @@ -426,7 +425,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) /// .res() /// .await /// .unwrap(); @@ -500,7 +499,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {}", sample.key_expr, sample.value); + /// println!("Received: {} {:?}", sample.key_expr, sample.payload); /// } /// # }) /// ``` @@ -508,7 +507,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> LivelinessSubscriberBuilder<'a, 'b, Handler> where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: crate::handlers::IntoCallbackReceiverPair<'static, Sample>, { let LivelinessSubscriberBuilder { session, @@ -594,8 +593,8 @@ where /// .unwrap(); /// while let Ok(token) = tokens.recv_async().await { /// match token.sample { -/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr.as_str(),), -/// Err(err) => println!("Received (ERROR: '{}')", String::try_from(&err).unwrap()), +/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr.as_str()), +/// Err(err) => println!("Received (ERROR: '{:?}')", err.payload), /// } /// } /// # }) diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index e8e84395f8..b0f7f7f7ef 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -559,7 +559,7 @@ pub fn route_query( payload: ReplyBody::Put(Put { // @TODO: handle Del case timestamp: None, // @TODO: handle timestamp - encoding: Encoding::DEFAULT, // @TODO: handle encoding + encoding: Encoding::empty(), // @TODO: handle encoding ext_sinfo: None, // @TODO: handle source info ext_attachment: None, // @TODO: expose it in the API #[cfg(feature = "shared-memory")] diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index cd7cf448cd..03b447aae0 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -12,8 +12,10 @@ // ZettaScale Zenoh Team, use super::routing::dispatcher::face::Face; use super::Runtime; +use crate::encoding::Encoding; use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; +use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::{Sample, SyncResolve}; use crate::queryable::Query; @@ -30,9 +32,11 @@ use std::sync::Mutex; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; use zenoh_plugin_trait::{PluginControl, PluginStatus}; -use zenoh_protocol::core::key_expr::keyexpr; use zenoh_protocol::{ - core::{key_expr::OwnedKeyExpr, ExprId, KnownEncoding, WireExpr, ZenohId, EMPTY_EXPR_ID}, + core::{ + key_expr::{keyexpr, OwnedKeyExpr}, + ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID, + }, network::{ declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, @@ -420,7 +424,7 @@ impl Primitives for AdminSpace { parameters, value: query .ext_body - .map(|b| Value::from(b.payload).encoding(b.encoding)), + .map(|b| Value::from(b.payload).with_encoding(b.encoding)), qid: msg.id, zid, primitives, @@ -561,13 +565,18 @@ fn router_data(context: &AdminContext, query: Query) { } log::trace!("AdminSpace router_data: {:?}", json); + let payload = match Payload::try_from(json) { + Ok(p) => p, + Err(e) => { + log::error!("Error serializing AdminSpace reply: {:?}", e); + return; + } + }; if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - Value::from(json.to_string().as_bytes().to_vec()) - .encoding(KnownEncoding::AppJson.into()), - ))) - .res() + .reply(Ok( + Sample::new(reply_key, payload).with_encoding(Encoding::APPLICATION_JSON) + )) + .res_sync() { log::error!("Error sending AdminSpace reply: {:?}", e); } @@ -596,13 +605,7 @@ zenoh_build{{version="{}"}} 1 .openmetrics_text(), ); - if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - Value::from(metrics.as_bytes().to_vec()).encoding(KnownEncoding::TextPlain.into()), - ))) - .res() - { + if let Err(e) = query.reply(Ok(Sample::new(reply_key, metrics))).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -617,14 +620,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(Ok(Sample::new( reply_key, - Value::from( - tables - .hat_code - .info(&tables, WhatAmI::Router) - .as_bytes() - .to_vec(), - ) - .encoding(KnownEncoding::TextPlain.into()), + tables.hat_code.info(&tables, WhatAmI::Router), ))) .res() { @@ -642,14 +638,7 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(Ok(Sample::new( reply_key, - Value::from( - tables - .hat_code - .info(&tables, WhatAmI::Peer) - .as_bytes() - .to_vec(), - ) - .encoding(KnownEncoding::TextPlain.into()), + tables.hat_code.info(&tables, WhatAmI::Peer), ))) .res() { @@ -667,7 +656,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { + if let Err(e) = query.reply(Ok(Sample::new(key, Payload::empty()))).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -684,7 +673,7 @@ fn queryables_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { + if let Err(e) = query.reply(Ok(Sample::new(key, Payload::empty()))).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -702,8 +691,13 @@ fn plugins_data(context: &AdminContext, query: Query) { log::debug!("plugin status: {:?}", status); let key = root_key.join(status.name()).unwrap(); let status = serde_json::to_value(status).unwrap(); - if let Err(e) = query.reply(Ok(Sample::new(key, Value::from(status)))).res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + match Payload::try_from(status) { + Ok(zbuf) => { + if let Err(e) = query.reply(Ok(Sample::new(key, zbuf))).res_sync() { + log::error!("Error sending AdminSpace reply: {:?}", e); + } + } + Err(e) => log::debug!("Admin query error: {}", e), } } } @@ -720,12 +714,7 @@ fn plugins_status(context: &AdminContext, query: Query) { with_extended_string(plugin_key, &["/__path__"], |plugin_path_key| { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { - if let Err(e) = query - .reply(Ok(Sample::new( - key_expr, - Value::from(plugin.path()).encoding(KnownEncoding::AppJson.into()), - ))) - .res() + if let Err(e) = query.reply(Ok(Sample::new(key_expr, plugin.path()))).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } @@ -748,13 +737,13 @@ fn plugins_status(context: &AdminContext, query: Query) { Ok(Ok(responses)) => { for response in responses { if let Ok(key_expr) = KeyExpr::try_from(response.key) { - if let Err(e) = query.reply(Ok(Sample::new( - key_expr, - Value::from(response.value).encoding(KnownEncoding::AppJson.into()), - ))) - .res() - { - log::error!("Error sending AdminSpace reply: {:?}", e); + match Payload::try_from(response.value) { + Ok(zbuf) => { + if let Err(e) = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync() { + log::error!("Error sending AdminSpace reply: {:?}", e); + } + }, + Err(e) => log::debug!("Admin query error: {}", e), } } else { log::error!("Error: plugin {} replied with an invalid key", plugin_key); diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index fdf0b6fe65..80a9dd458a 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -628,7 +628,7 @@ fn client_test() { ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -661,7 +661,7 @@ fn client_test() { ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -694,7 +694,7 @@ fn client_test() { ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -727,7 +727,7 @@ fn client_test() { ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -760,7 +760,7 @@ fn client_test() { ext::QoSType::DEFAULT, PushBody::Put(Put { timestamp: None, - encoding: Encoding::DEFAULT, + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs new file mode 100644 index 0000000000..f499db50da --- /dev/null +++ b/zenoh/src/payload.rs @@ -0,0 +1,673 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Payload primitives. +use crate::buffers::ZBuf; +use std::{ + borrow::Cow, + convert::Infallible, + fmt::Debug, + ops::{Deref, DerefMut}, + string::FromUtf8Error, + sync::Arc, +}; +use zenoh_buffers::{buffer::SplitBuffer, reader::HasReader, writer::HasWriter, ZSlice}; +use zenoh_result::ZResult; +#[cfg(feature = "shared-memory")] +use zenoh_shm::SharedMemoryBuf; + +#[repr(transparent)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct Payload(ZBuf); + +impl Payload { + /// Create an empty payload. + pub const fn empty() -> Self { + Self(ZBuf::empty()) + } + + /// Create a [`Payload`] from any type `T` that can implements [`Into`]. + pub fn new(t: T) -> Self + where + T: Into, + { + Self(t.into()) + } +} + +impl Deref for Payload { + type Target = ZBuf; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Payload { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +/// Provide some facilities specific to the Rust API to encode/decode a [`Value`] with an `Serialize`. +impl Payload { + /// Encode an object of type `T` as a [`Value`] using the [`ZSerde`]. + /// + /// ```rust + /// use zenoh::payload::Payload; + /// + /// let start = String::from("abc"); + /// let payload = Payload::serialize(start.clone()); + /// let end: String = payload.deserialize().unwrap(); + /// assert_eq!(start, end); + /// ``` + pub fn serialize(t: T) -> Self + where + ZSerde: Serialize, + { + ZSerde.serialize(t) + } + + /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. + /// See [encode](Value::encode) for an example. + pub fn deserialize(&self) -> ZResult + where + ZSerde: Deserialize, + >::Error: Debug, + { + let t: T = ZSerde.deserialize(self).map_err(|e| zerror!("{:?}", e))?; + Ok(t) + } +} + +/// Trait to encode a type `T` into a [`Value`]. +pub trait Serialize { + type Output; + + /// The implementer should take care of serializing the type `T` and set the proper [`Encoding`]. + fn serialize(self, t: T) -> Self::Output; +} + +pub trait Deserialize { + type Error; + + /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. + fn deserialize(self, t: &Payload) -> Result; +} + +/// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. +/// It also supports common Rust serde values. +#[derive(Clone, Copy, Debug)] +pub struct ZSerde; + +#[derive(Debug, Clone, Copy)] +pub struct ZDeserializeError; + +// Bytes +impl Serialize for ZSerde { + type Output = Payload; + + fn serialize(self, t: ZBuf) -> Self::Output { + Payload::new(t) + } +} + +impl From for ZBuf { + fn from(value: Payload) -> Self { + value.0 + } +} + +impl Deserialize for ZSerde { + type Error = Infallible; + + fn deserialize(self, v: &Payload) -> Result { + Ok(v.into()) + } +} + +impl From<&Payload> for ZBuf { + fn from(value: &Payload) -> Self { + value.0.clone() + } +} + +impl Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, t: Vec) -> Self::Output { + Payload::new(t) + } +} + +impl Serialize<&[u8]> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &[u8]) -> Self::Output { + Payload::new(t.to_vec()) + } +} + +impl Deserialize> for ZSerde { + type Error = Infallible; + + fn deserialize(self, v: &Payload) -> Result, Self::Error> { + let v: ZBuf = v.into(); + Ok(v.contiguous().to_vec()) + } +} + +impl From<&Payload> for Vec { + fn from(value: &Payload) -> Self { + value.contiguous().to_vec() + } +} + +impl<'a> Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, t: Cow<'a, [u8]>) -> Self::Output { + Payload::new(t.to_vec()) + } +} + +impl<'a> Deserialize> for ZSerde { + type Error = Infallible; + + fn deserialize(self, v: &Payload) -> Result, Self::Error> { + let v: Vec = Self.deserialize(v)?; + Ok(Cow::Owned(v)) + } +} + +impl<'a> From<&'a Payload> for Cow<'a, [u8]> { + fn from(value: &'a Payload) -> Self { + value.contiguous() + } +} + +// String +impl Serialize for ZSerde { + type Output = Payload; + + fn serialize(self, s: String) -> Self::Output { + Payload::new(s.into_bytes()) + } +} + +impl Serialize<&str> for ZSerde { + type Output = Payload; + + fn serialize(self, s: &str) -> Self::Output { + Self.serialize(s.to_string()) + } +} + +impl Deserialize for ZSerde { + type Error = FromUtf8Error; + + fn deserialize(self, v: &Payload) -> Result { + String::from_utf8(v.contiguous().to_vec()) + } +} + +impl TryFrom<&Payload> for String { + type Error = FromUtf8Error; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom for String { + type Error = FromUtf8Error; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + +impl<'a> Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, s: Cow<'a, str>) -> Self::Output { + Self.serialize(s.to_string()) + } +} + +impl<'a> Deserialize> for ZSerde { + type Error = FromUtf8Error; + + fn deserialize(self, v: &Payload) -> Result, Self::Error> { + let v: String = Self.deserialize(v)?; + Ok(Cow::Owned(v)) + } +} + +impl TryFrom<&Payload> for Cow<'_, str> { + type Error = FromUtf8Error; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } +} + +// - Integers impl +macro_rules! impl_int { + ($t:ty, $encoding:expr) => { + impl Serialize<$t> for ZSerde { + type Output = Payload; + + fn serialize(self, t: $t) -> Self::Output { + let bs = t.to_le_bytes(); + let end = 1 + bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1); + // SAFETY: + // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 + // - end is a valid end index because is bounded between 0 and bs.len() + Payload::new(unsafe { ZSlice::new_unchecked(Arc::new(bs), 0, end) }) + } + } + + impl Serialize<&$t> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &$t) -> Self::Output { + Self.serialize(*t) + } + } + + impl Serialize<&mut $t> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &mut $t) -> Self::Output { + Self.serialize(*t) + } + } + + impl Deserialize<$t> for ZSerde { + type Error = ZDeserializeError; + + fn deserialize(self, v: &Payload) -> Result<$t, Self::Error> { + let p = v.contiguous(); + let mut bs = (0 as $t).to_le_bytes(); + if p.len() > bs.len() { + return Err(ZDeserializeError); + } + bs[..p.len()].copy_from_slice(&p); + let t = <$t>::from_le_bytes(bs); + Ok(t) + } + } + + impl TryFrom<&Payload> for $t { + type Error = ZDeserializeError; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } + } + }; +} + +// Zenoh unsigned integers +impl_int!(u8, ZSerde::ZENOH_UINT); +impl_int!(u16, ZSerde::ZENOH_UINT); +impl_int!(u32, ZSerde::ZENOH_UINT); +impl_int!(u64, ZSerde::ZENOH_UINT); +impl_int!(usize, ZSerde::ZENOH_UINT); + +// Zenoh signed integers +impl_int!(i8, ZSerde::ZENOH_INT); +impl_int!(i16, ZSerde::ZENOH_INT); +impl_int!(i32, ZSerde::ZENOH_INT); +impl_int!(i64, ZSerde::ZENOH_INT); +impl_int!(isize, ZSerde::ZENOH_INT); + +// Zenoh floats +impl_int!(f32, ZSerde::ZENOH_FLOAT); +impl_int!(f64, ZSerde::ZENOH_FLOAT); + +// Zenoh bool +impl Serialize for ZSerde { + type Output = ZBuf; + + fn serialize(self, t: bool) -> Self::Output { + // SAFETY: casting a bool into an integer is well-defined behaviour. + // 0 is false, 1 is true: https://doc.rust-lang.org/std/primitive.bool.html + ZBuf::from((t as u8).to_le_bytes()) + } +} + +impl Deserialize for ZSerde { + type Error = ZDeserializeError; + + fn deserialize(self, v: &Payload) -> Result { + let p = v.contiguous(); + if p.len() != 1 { + return Err(ZDeserializeError); + } + match p[0] { + 0 => Ok(false), + 1 => Ok(true), + _ => Err(ZDeserializeError), + } + } +} + +impl TryFrom<&Payload> for bool { + type Error = ZDeserializeError; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } +} + +// - Zenoh advanced types encoders/decoders +// JSON +impl Serialize<&serde_json::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_json::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_json::to_writer(payload.writer(), t)?; + Ok(payload) + } +} + +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_json::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl Deserialize for ZSerde { + type Error = serde_json::Error; + + fn deserialize(self, v: &Payload) -> Result { + serde_json::from_reader(v.reader()) + } +} + +impl TryFrom for Payload { + type Error = serde_json::Error; + + fn try_from(value: serde_json::Value) -> Result { + ZSerde.serialize(value) + } +} + +// Yaml +impl Serialize<&serde_yaml::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_yaml::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_yaml::to_writer(payload.writer(), t)?; + Ok(payload) + } +} + +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_yaml::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl Deserialize for ZSerde { + type Error = serde_yaml::Error; + + fn deserialize(self, v: &Payload) -> Result { + serde_yaml::from_reader(v.reader()) + } +} + +impl TryFrom for Payload { + type Error = serde_yaml::Error; + + fn try_from(value: serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + +// CBOR +impl Serialize<&serde_cbor::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_cbor::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_cbor::to_writer(payload.writer(), t)?; + Ok(payload) + } +} + +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_cbor::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl Deserialize for ZSerde { + type Error = serde_cbor::Error; + + fn deserialize(self, v: &Payload) -> Result { + serde_cbor::from_reader(v.reader()) + } +} + +impl TryFrom for Payload { + type Error = serde_cbor::Error; + + fn try_from(value: serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + +// Pickle +impl Serialize<&serde_pickle::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_pickle::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_pickle::value_to_writer( + &mut payload.writer(), + t, + serde_pickle::SerOptions::default(), + )?; + Ok(payload) + } +} + +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_pickle::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl Deserialize for ZSerde { + type Error = serde_pickle::Error; + + fn deserialize(self, v: &Payload) -> Result { + serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) + } +} + +impl TryFrom for Payload { + type Error = serde_pickle::Error; + + fn try_from(value: serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + +// Shared memory conversion +#[cfg(feature = "shared-memory")] +impl Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, t: Arc) -> Self::Output { + Payload::new(t) + } +} + +#[cfg(feature = "shared-memory")] +impl Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, t: Box) -> Self::Output { + let smb: Arc = t.into(); + Self.serialize(smb) + } +} + +#[cfg(feature = "shared-memory")] +impl Serialize for ZSerde { + type Output = Payload; + + fn serialize(self, t: SharedMemoryBuf) -> Self::Output { + Payload::new(t) + } +} + +impl From for Payload +where + ZSerde: Serialize, +{ + fn from(t: T) -> Self { + ZSerde.serialize(t) + } +} + +// For convenience to always convert a Value the examples +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StringOrBase64 { + String(String), + Base64(String), +} + +impl Deref for StringOrBase64 { + type Target = String; + + fn deref(&self) -> &Self::Target { + match self { + Self::String(s) | Self::Base64(s) => s, + } + } +} + +impl std::fmt::Display for StringOrBase64 { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self) + } +} + +impl From for StringOrBase64 { + fn from(v: Payload) -> Self { + use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; + match v.deserialize::() { + Ok(s) => StringOrBase64::String(s), + Err(_) => StringOrBase64::Base64(b64_std_engine.encode(v.contiguous())), + } + } +} + +mod tests { + #[test] + fn serializer() { + use super::Payload; + use rand::Rng; + use zenoh_buffers::ZBuf; + + const NUM: usize = 1_000; + + macro_rules! serialize_deserialize { + ($t:ty, $in:expr) => { + let i = $in; + let t = i.clone(); + let v = Payload::serialize(t); + let o: $t = v.deserialize().unwrap(); + assert_eq!(i, o) + }; + } + + let mut rng = rand::thread_rng(); + + serialize_deserialize!(u8, u8::MIN); + serialize_deserialize!(u16, u16::MIN); + serialize_deserialize!(u32, u32::MIN); + serialize_deserialize!(u64, u64::MIN); + serialize_deserialize!(usize, usize::MIN); + + serialize_deserialize!(u8, u8::MAX); + serialize_deserialize!(u16, u16::MAX); + serialize_deserialize!(u32, u32::MAX); + serialize_deserialize!(u64, u64::MAX); + serialize_deserialize!(usize, usize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(u8, rng.gen::()); + serialize_deserialize!(u16, rng.gen::()); + serialize_deserialize!(u32, rng.gen::()); + serialize_deserialize!(u64, rng.gen::()); + serialize_deserialize!(usize, rng.gen::()); + } + + serialize_deserialize!(i8, i8::MIN); + serialize_deserialize!(i16, i16::MIN); + serialize_deserialize!(i32, i32::MIN); + serialize_deserialize!(i64, i64::MIN); + serialize_deserialize!(isize, isize::MIN); + + serialize_deserialize!(i8, i8::MAX); + serialize_deserialize!(i16, i16::MAX); + serialize_deserialize!(i32, i32::MAX); + serialize_deserialize!(i64, i64::MAX); + serialize_deserialize!(isize, isize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(i8, rng.gen::()); + serialize_deserialize!(i16, rng.gen::()); + serialize_deserialize!(i32, rng.gen::()); + serialize_deserialize!(i64, rng.gen::()); + serialize_deserialize!(isize, rng.gen::()); + } + + serialize_deserialize!(f32, f32::MIN); + serialize_deserialize!(f64, f64::MIN); + + serialize_deserialize!(f32, f32::MAX); + serialize_deserialize!(f64, f64::MAX); + + for _ in 0..NUM { + serialize_deserialize!(f32, rng.gen::()); + serialize_deserialize!(f64, rng.gen::()); + } + + serialize_deserialize!(String, ""); + serialize_deserialize!(String, String::from("abcdefghijklmnopqrstuvwxyz")); + + serialize_deserialize!(Vec, vec![0u8; 0]); + serialize_deserialize!(Vec, vec![0u8; 64]); + + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + } +} diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index ad28470f63..59a4bbd96e 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -31,21 +31,20 @@ pub(crate) mod common { writer::HasWriter, }; pub use zenoh_core::Resolve; - - pub(crate) type Id = usize; + pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; pub use crate::config::{self, Config, ValidatedMap}; pub use crate::handlers::IntoCallbackReceiverPair; - pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; - pub use crate::query::{QueryConsolidation, QueryTarget}; + pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::selector::{Parameter, Parameters, Selector}; - pub use crate::value::Value; + pub use crate::encoding::Encoding; /// The encoding of a zenoh `Value`. - pub use zenoh_protocol::core::{Encoding, KnownEncoding}; + pub use crate::payload::{Deserialize, Payload, Serialize}; + pub use crate::value::Value; - pub use crate::query::ConsolidationMode; #[zenoh_macros::unstable] pub use crate::sample::Locality; #[cfg(not(feature = "unstable"))] @@ -56,13 +55,6 @@ pub(crate) mod common { #[zenoh_macros::unstable] pub use crate::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - - /// A [`Locator`] contains a choice of protocol, an address and port, as well as optional additional properties to work with. - pub use zenoh_protocol::core::EndPoint; - /// A [`Locator`] contains a choice of protocol, an address and port, as well as optional additional properties to work with. - pub use zenoh_protocol::core::Locator; - /// The global unique id of a zenoh peer. - pub use zenoh_protocol::core::ZenohId; } /// Prelude to import when using Zenoh's sync API. diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 4d45c3919d..9fb4bdf6c3 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -13,21 +13,24 @@ // //! Publishing primitives. -#[zenoh_macros::unstable] -use crate::handlers::Callback; -#[zenoh_macros::unstable] -use crate::handlers::DefaultHandler; +use crate::encoding::Encoding; +use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; -use crate::prelude::*; +use crate::payload::Payload; #[zenoh_macros::unstable] use crate::sample::Attachment; -use crate::sample::DataInfo; -use crate::sample::QoS; -use crate::Encoding; +use crate::sample::{DataInfo, QoS, Sample, SampleKind}; +use crate::Locality; use crate::SessionRef; use crate::Undeclarable; +#[cfg(feature = "unstable")] +use crate::{ + handlers::{Callback, DefaultHandler, IntoCallbackReceiverPair}, + Id, +}; use std::future::Ready; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_keyexpr::keyexpr; use zenoh_protocol::network::push::ext; use zenoh_protocol::network::Mapping; use zenoh_protocol::network::Push; @@ -67,8 +70,8 @@ pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session -/// .put("key/expression", "value") -/// .encoding(KnownEncoding::TextPlain) +/// .put("key/expression", "payload") +/// .with_encoding(Encoding::TEXT_PLAIN) /// .congestion_control(CongestionControl::Block) /// .res() /// .await @@ -79,22 +82,14 @@ pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; #[derive(Debug, Clone)] pub struct PutBuilder<'a, 'b> { pub(crate) publisher: PublisherBuilder<'a, 'b>, - pub(crate) value: Value, + pub(crate) payload: Payload, pub(crate) kind: SampleKind, + pub(crate) encoding: Encoding, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } impl PutBuilder<'_, '_> { - /// Change the encoding of the written data. - #[inline] - pub fn encoding(mut self, encoding: IntoEncoding) -> Self - where - IntoEncoding: Into, - { - self.value.encoding = encoding.into(); - self - } /// Change the `congestion_control` to apply when routing the data. #[inline] pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { @@ -118,12 +113,18 @@ impl PutBuilder<'_, '_> { self } - pub fn kind(mut self, kind: SampleKind) -> Self { - self.kind = kind; + /// Set the [`Encoding`] of the written data. + #[inline] + pub fn with_encoding(mut self, encoding: IntoEncoding) -> Self + where + IntoEncoding: Into, + { + self.encoding = encoding.into(); self } #[zenoh_macros::unstable] + /// Attach user-provided data to the written data. pub fn with_attachment(mut self, attachment: Attachment) -> Self { self.attachment = Some(attachment); self @@ -155,8 +156,9 @@ impl SyncResolve for PutBuilder<'_, '_> { resolve_put( &publisher, - self.value, + self.payload, self.kind, + self.encoding, #[cfg(feature = "unstable")] self.attachment, ) @@ -308,11 +310,12 @@ impl<'a> Publisher<'a> { std::sync::Arc::new(self) } - fn _write(&self, kind: SampleKind, value: Value) -> Publication { + fn _write(&self, kind: SampleKind, payload: Payload) -> Publication { Publication { publisher: self, - value, + payload, kind, + encoding: Encoding::ZENOH_BYTES, #[cfg(feature = "unstable")] attachment: None, } @@ -327,12 +330,12 @@ impl<'a> Publisher<'a> { /// /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.write(SampleKind::Put, "value").res().await.unwrap(); + /// publisher.write(SampleKind::Put, "payload").res().await.unwrap(); /// # }) /// ``` - pub fn write(&self, kind: SampleKind, value: IntoValue) -> Publication + pub fn write(&self, kind: SampleKind, value: IntoPayload) -> Publication where - IntoValue: Into, + IntoPayload: Into, { self._write(kind, value.into()) } @@ -350,11 +353,11 @@ impl<'a> Publisher<'a> { /// # }) /// ``` #[inline] - pub fn put(&self, value: IntoValue) -> Publication + pub fn put(&self, payload: IntoPayload) -> Publication where - IntoValue: Into, + IntoPayload: Into, { - self._write(SampleKind::Put, value.into()) + self._write(SampleKind::Put, payload.into()) } /// Delete data. @@ -370,7 +373,7 @@ impl<'a> Publisher<'a> { /// # }) /// ``` pub fn delete(&self) -> Publication { - self._write(SampleKind::Delete, Value::empty()) + self._write(SampleKind::Delete, Payload::empty()) } /// Return the [`MatchingStatus`] of the publisher. @@ -597,13 +600,19 @@ impl Drop for Publisher<'_> { #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct Publication<'a> { publisher: &'a Publisher<'a>, - value: Value, + payload: Payload, kind: SampleKind, + encoding: Encoding, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } impl<'a> Publication<'a> { + pub fn with_encoding(mut self, encoding: Encoding) -> Self { + self.encoding = encoding; + self + } + #[zenoh_macros::unstable] pub fn with_attachment(mut self, attachment: Attachment) -> Self { self.attachment = Some(attachment); @@ -619,8 +628,9 @@ impl SyncResolve for Publication<'_> { fn res_sync(self) -> ::To { resolve_put( self.publisher, - self.value, + self.payload, self.kind, + self.encoding, #[cfg(feature = "unstable")] self.attachment, ) @@ -635,10 +645,7 @@ impl AsyncResolve for Publication<'_> { } } -impl<'a, IntoValue> Sink for Publisher<'a> -where - IntoValue: Into, -{ +impl<'a> Sink for Publisher<'a> { type Error = Error; #[inline] @@ -647,8 +654,16 @@ where } #[inline] - fn start_send(self: Pin<&mut Self>, item: IntoValue) -> Result<(), Self::Error> { - self.put(item.into()).res_sync() + fn start_send(self: Pin<&mut Self>, item: Sample) -> Result<(), Self::Error> { + Publication { + publisher: &self, + payload: item.payload, + kind: item.kind, + encoding: item.encoding, + #[cfg(feature = "unstable")] + attachment: item.attachment, + } + .res_sync() } #[inline] @@ -791,8 +806,9 @@ impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { fn resolve_put( publisher: &Publisher<'_>, - value: Value, + payload: Payload, kind: SampleKind, + encoding: Encoding, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { log::trace!("write({:?}, [...])", &publisher.key_expr); @@ -825,13 +841,13 @@ fn resolve_put( } PushBody::Put(Put { timestamp, - encoding: value.encoding.clone(), + encoding: encoding.clone().into(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, ext_attachment, ext_unknown: vec![], - payload: value.payload.clone(), + payload: payload.clone().into(), }) } SampleKind::Delete => { @@ -856,7 +872,7 @@ fn resolve_put( if publisher.destination != Locality::Remote { let data_info = DataInfo { kind, - encoding: Some(value.encoding), + encoding: Some(encoding), timestamp, source_id: None, source_sn: None, @@ -871,7 +887,7 @@ fn resolve_put( true, &publisher.key_expr.to_wire(&publisher.session), Some(data_info), - value.payload, + payload.into(), #[cfg(feature = "unstable")] attachment, ); @@ -1366,7 +1382,7 @@ mod tests { let sample = sub.recv().unwrap(); assert_eq!(sample.kind, kind); - assert_eq!(sample.value.to_string(), VALUE); + assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); } sample_kind_integrity_in_publication_with(SampleKind::Put); @@ -1392,7 +1408,7 @@ mod tests { assert_eq!(sample.kind, kind); if let SampleKind::Put = kind { - assert_eq!(sample.value.to_string(), VALUE); + assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index b278bcfa26..6bd78d4fc7 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -17,14 +17,12 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::query::ReplyKeyExpr; -#[zenoh_macros::unstable] -use crate::sample::Attachment; use crate::sample::DataInfo; +use crate::Id; use crate::SessionRef; use crate::Undeclarable; - +#[cfg(feature = "unstable")] +use crate::{query::ReplyKeyExpr, sample::Attachment}; use std::fmt; use std::future::Ready; use std::ops::Deref; @@ -190,8 +188,9 @@ impl SyncResolve for ReplyBuilder<'_> { } let Sample { key_expr, - value: Value { payload, encoding }, + payload, kind, + encoding, timestamp, qos, #[cfg(feature = "unstable")] @@ -251,13 +250,13 @@ impl SyncResolve for ReplyBuilder<'_> { payload: match kind { SampleKind::Put => ReplyBody::Put(Put { timestamp: data_info.timestamp, - encoding: data_info.encoding.unwrap_or_default(), + encoding: data_info.encoding.unwrap_or_default().into(), ext_sinfo, #[cfg(feature = "shared-memory")] ext_shm: None, ext_attachment: ext_attachment!(), ext_unknown: vec![], - payload, + payload: payload.into(), }), SampleKind::Delete => ReplyBody::Del(Del { timestamp, @@ -292,8 +291,8 @@ impl SyncResolve for ReplyBuilder<'_> { ext_body: Some(ValueType { #[cfg(feature = "shared-memory")] ext_shm: None, - payload: payload.payload, - encoding: payload.encoding, + payload: payload.payload.into(), + encoding: payload.encoding.into(), }), code: 0, // TODO }), diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 89b787fef5..543dd62e84 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -13,19 +13,19 @@ // //! Sample primitives -use crate::buffers::ZBuf; -use crate::prelude::{KeyExpr, Value, ZenohId}; -use crate::query::Reply; +use crate::encoding::Encoding; +use crate::payload::Payload; +use crate::prelude::{KeyExpr, ZenohId}; use crate::time::{new_reception_timestamp, Timestamp}; use crate::Priority; +use crate::Value; #[zenoh_macros::unstable] use serde::Serialize; use std::{ convert::{TryFrom, TryInto}, fmt, }; -use zenoh_protocol::core::{CongestionControl, Encoding}; -use zenoh_protocol::network::push::ext::QoSType; +use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType}; pub type SourceSn = u64; @@ -357,10 +357,12 @@ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; pub struct Sample { /// The key expression on which this Sample was published. pub key_expr: KeyExpr<'static>, - /// The value of this Sample. - pub value: Value, + /// The payload of this Sample. + pub payload: Payload, /// The kind of this Sample. pub kind: SampleKind, + /// The encoding of this sample + pub encoding: Encoding, /// The [`Timestamp`] of this Sample. pub timestamp: Option, /// Quality of service settings this sample was sent with. @@ -390,14 +392,15 @@ pub struct Sample { impl Sample { /// Creates a new Sample. #[inline] - pub fn new(key_expr: IntoKeyExpr, value: IntoValue) -> Self + pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self where IntoKeyExpr: Into>, - IntoValue: Into, + IntoPayload: Into, { Sample { key_expr: key_expr.into(), - value: value.into(), + payload: payload.into(), + encoding: Encoding::default(), kind: SampleKind::default(), timestamp: None, qos: QoS::default(), @@ -409,18 +412,19 @@ impl Sample { } /// Creates a new Sample. #[inline] - pub fn try_from( + pub fn try_from( key_expr: TryIntoKeyExpr, - value: IntoValue, + payload: IntoPayload, ) -> Result where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoValue: Into, + IntoPayload: Into, { Ok(Sample { key_expr: key_expr.try_into().map_err(Into::into)?, - value: value.into(), + payload: payload.into(), + encoding: Encoding::default(), kind: SampleKind::default(), timestamp: None, qos: QoS::default(), @@ -433,40 +437,30 @@ impl Sample { /// Creates a new Sample with optional data info. #[inline] - pub(crate) fn with_info( - key_expr: KeyExpr<'static>, - payload: ZBuf, - data_info: Option, - ) -> Self { - let mut value: Value = payload.into(); - if let Some(data_info) = data_info { - if let Some(encoding) = &data_info.encoding { - value.encoding = encoding.clone(); + pub(crate) fn with_info(mut self, mut data_info: Option) -> Self { + if let Some(mut data_info) = data_info.take() { + self.kind = data_info.kind; + if let Some(encoding) = data_info.encoding.take() { + self.encoding = encoding; } - Sample { - key_expr, - value, - kind: data_info.kind, - timestamp: data_info.timestamp, - qos: data_info.qos, - #[cfg(feature = "unstable")] - source_info: data_info.into(), - #[cfg(feature = "unstable")] - attachment: None, - } - } else { - Sample { - key_expr, - value, - kind: SampleKind::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, + self.qos = data_info.qos; + self.timestamp = data_info.timestamp; + #[cfg(feature = "unstable")] + { + self.source_info = SourceInfo { + source_id: data_info.source_id, + source_sn: data_info.source_sn, + }; } } + self + } + + /// Sets the encoding of this Sample. + #[inline] + pub fn with_encoding(mut self, encoding: Encoding) -> Self { + self.encoding = encoding; + self } /// Gets the timestamp of this Sample. @@ -522,34 +516,9 @@ impl Sample { } } -impl std::ops::Deref for Sample { - type Target = Value; - - fn deref(&self) -> &Self::Target { - &self.value - } -} - -impl std::ops::DerefMut for Sample { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.value - } -} - -impl std::fmt::Display for Sample { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self.kind { - SampleKind::Delete => write!(f, "{}({})", self.kind, self.key_expr), - _ => write!(f, "{}({}: {})", self.kind, self.key_expr, self.value), - } - } -} - -impl TryFrom for Sample { - type Error = Value; - - fn try_from(value: Reply) -> Result { - value.sample +impl From for Value { + fn from(sample: Sample) -> Self { + Value::new(sample.payload).with_encoding(sample.encoding) } } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 9ab0242f16..87c416c209 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // - use crate::admin; use crate::config::Config; use crate::config::Notifier; +use crate::encoding::Encoding; use crate::handlers::{Callback, DefaultHandler}; use crate::info::*; use crate::key_expr::KeyExprInner; @@ -23,6 +23,7 @@ use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; +use crate::payload::Payload; use crate::prelude::Locality; use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; @@ -670,7 +671,7 @@ impl Session { /// # Arguments /// /// * `key_expr` - Key expression matching the resources to put - /// * `value` - The value to put + /// * `payload` - The payload to put /// /// # Examples /// ``` @@ -679,28 +680,29 @@ impl Session { /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session - /// .put("key/expression", "value") - /// .encoding(KnownEncoding::TextPlain) + /// .put("key/expression", "payload") + /// .with_encoding(Encoding::TEXT_PLAIN) /// .res() /// .await /// .unwrap(); /// # }) /// ``` #[inline] - pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoValue>( + pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoPayload>( &'a self, key_expr: TryIntoKeyExpr, - value: IntoValue, + payload: IntoPayload, ) -> PutBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoValue: Into, + IntoPayload: Into, { PutBuilder { publisher: self.declare_publisher(key_expr), - value: value.into(), + payload: payload.into(), kind: SampleKind::Put, + encoding: Encoding::default(), #[cfg(feature = "unstable")] attachment: None, } @@ -732,8 +734,9 @@ impl Session { { PutBuilder { publisher: self.declare_publisher(key_expr), - value: Value::empty(), + payload: Payload::empty(), kind: SampleKind::Delete, + encoding: Encoding::default(), #[cfg(feature = "unstable")] attachment: None, } @@ -1669,7 +1672,7 @@ impl Session { let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { #[allow(unused_mut)] - let mut sample = Sample::with_info(key_expr, payload.clone(), info.clone()); + let mut sample = Sample::new(key_expr, payload.clone()).with_info(info.clone()); #[cfg(feature = "unstable")] { sample.attachment = attachment.clone(); @@ -1678,7 +1681,7 @@ impl Session { } if let Some((cb, key_expr)) = last { #[allow(unused_mut)] - let mut sample = Sample::with_info(key_expr, payload, info); + let mut sample = Sample::new(key_expr, payload).with_info(info); #[cfg(feature = "unstable")] { sample.attachment = attachment; @@ -1785,8 +1788,8 @@ impl Session { ); let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); + if destination != Locality::SessionLocal { #[allow(unused_mut)] let mut ext_attachment = None; @@ -1812,8 +1815,8 @@ impl Session { ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, - encoding: v.encoding.clone(), - payload: v.payload.clone(), + encoding: v.encoding.clone().into(), + payload: v.payload.clone().into(), }), ext_attachment, ext_unknown: vec![], @@ -1831,8 +1834,8 @@ impl Session { value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, - encoding: v.encoding.clone(), - payload: v.payload.clone(), + encoding: v.encoding.clone().into(), + payload: v.payload.clone().into(), }), #[cfg(feature = "unstable")] attachment, @@ -1902,8 +1905,8 @@ impl Session { key_expr, parameters, value: body.map(|b| Value { - payload: b.payload, - encoding: b.encoding, + payload: b.payload.into(), + encoding: b.encoding.into(), }), qid, zid, @@ -2188,7 +2191,7 @@ impl Primitives for Session { PushBody::Put(m) => { let info = DataInfo { kind: SampleKind::Put, - encoding: Some(m.encoding), + encoding: Some(m.encoding.into()), timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), source_id: m.ext_sinfo.as_ref().map(|i| i.zid), @@ -2260,12 +2263,12 @@ impl Primitives for Session { std::mem::drop(state); let value = match e.ext_body { Some(body) => Value { - payload: body.payload, - encoding: body.encoding, + payload: body.payload.into(), + encoding: body.encoding.into(), }, None => Value { - payload: ZBuf::empty(), - encoding: zenoh_protocol::core::Encoding::EMPTY, + payload: Payload::empty(), + encoding: Encoding::default(), }, }; let replier_id = match e.ext_sinfo { @@ -2360,7 +2363,7 @@ impl Primitives for Session { payload, info: DataInfo { kind: SampleKind::Put, - encoding: Some(encoding), + encoding: Some(encoding.into()), timestamp, qos: QoS::from(msg.ext_qos), source_id: ext_sinfo.as_ref().map(|i| i.zid), @@ -2391,7 +2394,7 @@ impl Primitives for Session { #[allow(unused_mut)] let mut sample = - Sample::with_info(key_expr.into_owned(), payload, Some(info)); + Sample::new(key_expr.into_owned(), payload).with_info(Some(info)); #[cfg(feature = "unstable")] { sample.attachment = attachment; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index e0123ec6b1..c707218017 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -13,9 +13,11 @@ // //! Subscribing primitives. -use crate::handlers::{locked, Callback, DefaultHandler}; +use crate::handlers::{locked, Callback, DefaultHandler, IntoCallbackReceiverPair}; +use crate::key_expr::KeyExpr; use crate::prelude::Locality; -use crate::prelude::{Id, IntoCallbackReceiverPair, KeyExpr, Sample}; +use crate::sample::Sample; +use crate::Id; use crate::Undeclarable; use crate::{Result as ZResult, SessionRef}; use std::fmt; @@ -62,7 +64,7 @@ impl fmt::Debug for SubscriberState { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) +/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload) }) /// .res() /// .await /// .unwrap(); @@ -95,7 +97,7 @@ pub(crate) struct SubscriberInner<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) +/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) /// .pull_mode() /// .res() /// .await @@ -118,7 +120,7 @@ impl<'a> PullSubscriberInner<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) /// .pull_mode() /// .res() /// .await @@ -327,7 +329,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) /// .res() /// .await /// .unwrap(); @@ -402,7 +404,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {}", sample.key_expr, sample.value); + /// println!("Received: {} {:?}", sample.key_expr, sample.payload); /// } /// # }) /// ``` @@ -631,7 +633,7 @@ where /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { -/// println!("Received: {} {}", sample.key_expr, sample.value); +/// println!("Received: {} {:?}", sample.key_expr, sample.payload); /// } /// # }) /// ``` diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 849cfd57d5..128f0ff605 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,693 +13,57 @@ // //! Value primitives. +use crate::{encoding::Encoding, payload::Payload}; -use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; -use std::borrow::Cow; -use std::convert::TryFrom; -#[cfg(feature = "shared-memory")] -use std::sync::Arc; - -use zenoh_collections::Properties; -use zenoh_result::ZError; - -use crate::buffers::ZBuf; -use crate::prelude::{Encoding, KnownEncoding, Sample, SplitBuffer}; -#[cfg(feature = "shared-memory")] -use zenoh_shm::SharedMemoryBuf; - -/// A zenoh Value. +/// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct Value { - /// The payload of this Value. - pub payload: ZBuf, - /// An encoding description indicating how the associated payload is encoded. + /// The binary [`Payload`] of this [`Value`]. + pub payload: Payload, + /// The [`Encoding`] of this [`Value`]. pub encoding: Encoding, } impl Value { - /// Creates a new zenoh Value. - pub fn new(payload: ZBuf) -> Self { + /// Creates a new [`Value`] with default [`Encoding`]. + pub fn new(payload: T) -> Self + where + T: Into, + { Value { - payload, - encoding: KnownEncoding::AppOctetStream.into(), + payload: payload.into(), + encoding: Encoding::default(), } } - /// Creates an empty Value. - pub fn empty() -> Self { + /// Creates an empty [`Value`]. + pub const fn empty() -> Self { Value { - payload: ZBuf::empty(), - encoding: KnownEncoding::AppOctetStream.into(), + payload: Payload::empty(), + encoding: Encoding::default(), } } - /// Sets the encoding of this zenoh Value. + /// Sets the encoding of this [`Value`]`. #[inline(always)] - pub fn encoding(mut self, encoding: Encoding) -> Self { - self.encoding = encoding; + pub fn with_encoding(mut self, encoding: IntoEncoding) -> Self + where + IntoEncoding: Into, + { + self.encoding = encoding.into(); self } } -impl std::fmt::Debug for Value { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - f, - "Value{{ payload: {:?}, encoding: {} }}", - self.payload, self.encoding - ) - } -} - -impl std::fmt::Display for Value { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let payload = self.payload.contiguous(); - write!( - f, - "{}", - String::from_utf8(payload.clone().into_owned()) - .unwrap_or_else(|_| b64_std_engine.encode(payload)) - ) - } -} - -impl std::error::Error for Value {} - -// Shared memory conversion -#[cfg(feature = "shared-memory")] -impl From> for Value { - fn from(smb: Arc) -> Self { - Value { - payload: smb.into(), - encoding: KnownEncoding::AppOctetStream.into(), - } - } -} - -#[cfg(feature = "shared-memory")] -impl From> for Value { - fn from(smb: Box) -> Self { - let smb: Arc = smb.into(); - Self::from(smb) - } -} - -#[cfg(feature = "shared-memory")] -impl From for Value { - fn from(smb: SharedMemoryBuf) -> Self { +impl From for Value +where + T: Into, +{ + fn from(t: T) -> Self { Value { - payload: smb.into(), - encoding: KnownEncoding::AppOctetStream.into(), + payload: t.into(), + encoding: Encoding::default(), } } } - -// Bytes conversion -impl From for Value { - fn from(buf: ZBuf) -> Self { - Value { - payload: buf, - encoding: KnownEncoding::AppOctetStream.into(), - } - } -} - -impl TryFrom<&Value> for ZBuf { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppOctetStream => Ok(v.payload.clone()), - unexpected => Err(zerror!( - "{:?} can not be converted into Cow<'a, [u8]>", - unexpected - )), - } - } -} - -impl TryFrom for ZBuf { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -impl From<&[u8]> for Value { - fn from(buf: &[u8]) -> Self { - Value::from(ZBuf::from(buf.to_vec())) - } -} - -impl<'a> TryFrom<&'a Value> for Cow<'a, [u8]> { - type Error = ZError; - - fn try_from(v: &'a Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppOctetStream => Ok(v.payload.contiguous()), - unexpected => Err(zerror!( - "{:?} can not be converted into Cow<'a, [u8]>", - unexpected - )), - } - } -} - -impl From> for Value { - fn from(buf: Vec) -> Self { - Value::from(ZBuf::from(buf)) - } -} - -impl TryFrom<&Value> for Vec { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppOctetStream => Ok(v.payload.contiguous().to_vec()), - unexpected => Err(zerror!( - "{:?} can not be converted into Vec", - unexpected - )), - } - } -} - -impl TryFrom for Vec { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// String conversion -impl From for Value { - fn from(s: String) -> Self { - Value { - payload: ZBuf::from(s.into_bytes()), - encoding: KnownEncoding::TextPlain.into(), - } - } -} - -impl From<&str> for Value { - fn from(s: &str) -> Self { - Value { - payload: ZBuf::from(Vec::::from(s)), - encoding: KnownEncoding::TextPlain.into(), - } - } -} - -impl TryFrom<&Value> for String { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::TextPlain => { - String::from_utf8(v.payload.contiguous().to_vec()).map_err(|e| zerror!("{}", e)) - } - unexpected => Err(zerror!("{:?} can not be converted into String", unexpected)), - } - } -} - -impl TryFrom for String { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// Sample conversion -impl From for Value { - fn from(s: Sample) -> Self { - s.value - } -} - -// i64 conversion -impl From for Value { - fn from(i: i64) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i64 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i64", unexpected)), - } - } -} - -impl TryFrom for i64 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// i32 conversion -impl From for Value { - fn from(i: i32) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i32 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i32", unexpected)), - } - } -} - -impl TryFrom for i32 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// i16 conversion -impl From for Value { - fn from(i: i16) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i16 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i16", unexpected)), - } - } -} - -impl TryFrom for i16 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// i8 conversion -impl From for Value { - fn from(i: i8) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i8 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i8", unexpected)), - } - } -} - -impl TryFrom for i8 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// isize conversion -impl From for Value { - fn from(i: isize) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for isize { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into isize", unexpected)), - } - } -} - -impl TryFrom for isize { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u64 conversion -impl From for Value { - fn from(i: u64) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u64 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u64", unexpected)), - } - } -} - -impl TryFrom for u64 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u32 conversion -impl From for Value { - fn from(i: u32) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u32 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u32", unexpected)), - } - } -} - -impl TryFrom for u32 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u16 conversion -impl From for Value { - fn from(i: u16) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u16 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u16", unexpected)), - } - } -} - -impl TryFrom for u16 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u8 conversion -impl From for Value { - fn from(i: u8) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u8 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u8", unexpected)), - } - } -} - -impl TryFrom for u8 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// usize conversion -impl From for Value { - fn from(i: usize) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for usize { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into usize", unexpected)), - } - } -} - -impl TryFrom for usize { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// f64 conversion -impl From for Value { - fn from(f: f64) -> Self { - Value { - payload: ZBuf::from(Vec::::from(f.to_string())), - encoding: KnownEncoding::AppFloat.into(), - } - } -} - -impl TryFrom<&Value> for f64 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppFloat => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into f64", unexpected)), - } - } -} - -impl TryFrom for f64 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// f32 conversion -impl From for Value { - fn from(f: f32) -> Self { - Value { - payload: ZBuf::from(Vec::::from(f.to_string())), - encoding: KnownEncoding::AppFloat.into(), - } - } -} - -impl TryFrom<&Value> for f32 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppFloat => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into f32", unexpected)), - } - } -} - -impl TryFrom for f32 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// JSON conversion -impl From<&serde_json::Value> for Value { - fn from(json: &serde_json::Value) -> Self { - Value { - payload: ZBuf::from(Vec::::from(json.to_string())), - encoding: KnownEncoding::AppJson.into(), - } - } -} - -impl From for Value { - fn from(json: serde_json::Value) -> Self { - Value::from(&json) - } -} - -impl TryFrom<&Value> for serde_json::Value { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppJson | KnownEncoding::TextJson => { - let r = serde::Deserialize::deserialize(&mut serde_json::Deserializer::from_slice( - &v.payload.contiguous(), - )); - r.map_err(|e| zerror!("{}", e)) - } - unexpected => Err(zerror!( - "{:?} can not be converted into Properties", - unexpected - )), - } - } -} - -impl TryFrom for serde_json::Value { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// Properties conversion -impl From for Value { - fn from(p: Properties) -> Self { - Value { - payload: ZBuf::from(Vec::::from(p.to_string())), - encoding: KnownEncoding::AppProperties.into(), - } - } -} - -impl TryFrom<&Value> for Properties { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match *v.encoding.prefix() { - KnownEncoding::AppProperties => Ok(Properties::from( - std::str::from_utf8(&v.payload.contiguous()).map_err(|e| zerror!("{}", e))?, - )), - unexpected => Err(zerror!( - "{:?} can not be converted into Properties", - unexpected - )), - } - } -} - -impl TryFrom for Properties { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index d1fbd1086a..89dd3e231f 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -75,7 +75,7 @@ fn queries() { query .reply(Ok(Sample::new( query.key_expr().clone(), - query.value().unwrap().clone(), + query.value().unwrap().payload.clone(), ) .with_attachment(attachment))) .res() diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 3b10f12f03..5c96f080f8 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -58,7 +58,7 @@ impl Task { let sub = ztimeout!(session.declare_subscriber(ke).res_async())?; let mut counter = 0; while let Ok(sample) = sub.recv_async().await { - let recv_size = sample.value.payload.len(); + let recv_size = sample.payload.len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } @@ -75,7 +75,7 @@ impl Task { let value: Value = vec![0u8; *payload_size].into(); while remaining_checkpoints.load(Ordering::Relaxed) > 0 { ztimeout!(session - .put(ke, value.clone()) + .put(ke, value.payload.clone()) .congestion_control(CongestionControl::Block) .res_async())?; } @@ -91,7 +91,7 @@ impl Task { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - let recv_size = sample.value.payload.len(); + let recv_size = sample.payload.len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } @@ -99,7 +99,7 @@ impl Task { Err(err) => { log::warn!( - "Sample got from {} failed to unwrap! Error: {}.", + "Sample got from {} failed to unwrap! Error: {:?}.", ke, err ); diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index f727ad60c3..8a3f4381d2 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -95,7 +95,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re let sub = ztimeout!(peer01 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.value.payload.len(), size); + assert_eq!(sample.payload.len(), size); c_msgs.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -194,7 +194,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.sample.unwrap(); assert_eq!(s.kind, SampleKind::Put); - assert_eq!(s.value.payload.len(), size); + assert_eq!(s.payload.len(), size); cnt += 1; } } @@ -212,7 +212,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.sample.unwrap(); assert_eq!(s.kind, SampleKind::Delete); - assert_eq!(s.value.payload.len(), 0); + assert_eq!(s.payload.len(), 0); cnt += 1; } } diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index b986c92e8f..76910ee5de 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -114,7 +114,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { let sub1 = ztimeout!(s01 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.value.payload.len(), size); + assert_eq!(sample.payload.len(), size); c_msgs1.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -126,7 +126,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { let sub2 = ztimeout!(s02 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.value.payload.len(), size); + assert_eq!(sample.payload.len(), size); c_msgs2.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -224,7 +224,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { for _ in 0..msg_count { let rs = ztimeout!(s03.get(key_expr).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().value.payload.len(), size); + assert_eq!(s.sample.unwrap().payload.len(), size); cnt += 1; } } From b11a20e11f3c2c7c66644417cb0ed08b852cb88a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 12 Mar 2024 09:42:32 +0100 Subject: [PATCH 007/357] Fix protocol comment --- commons/zenoh-protocol/src/zenoh/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 4c8458885b..1284116888 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -214,7 +214,7 @@ pub mod ext { /// +-+-+-+-+-+-+-+-+ /// ~ encoding ~ /// +---------------+ - /// ~ pl: ~ -- Payload + /// ~ pl: [u8;z32] ~ -- Payload /// +---------------+ /// ``` #[derive(Debug, Clone, PartialEq, Eq)] From fcbceb07ae9bd4160a294ab5c982b6882eca6a7b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 12 Mar 2024 12:43:21 +0100 Subject: [PATCH 008/357] Improve Endpoint and Locator doc --- commons/zenoh-protocol/src/core/endpoint.rs | 7 ++++++- commons/zenoh-protocol/src/core/locator.rs | 6 +++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 5e921345e4..a8fcb3ae98 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -497,7 +497,12 @@ impl fmt::Debug for ConfigMut<'_> { } } -/// A `String` that respects the [`EndPoint`] canon form: `#`, such that `` is a valid [`Locator`] `` is of the form `=;...;=` where keys are alphabetically sorted. +/// A string that respects the [`EndPoint`] canon form: `[#]`. +/// +/// `` is a valid [`Locator`] and `` is of the form `=;...;=` where keys are alphabetically sorted. +/// `` is optional and can be provided to configure some aspectes for an [`EndPoint`], e.g. the interface to listen on or connect to. +/// +/// A full [`EndPoint`] string is hence in the form of `/
[?][#config]`. #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[serde(into = "String")] #[serde(try_from = "String")] diff --git a/commons/zenoh-protocol/src/core/locator.rs b/commons/zenoh-protocol/src/core/locator.rs index 42379f2b65..50b909b12f 100644 --- a/commons/zenoh-protocol/src/core/locator.rs +++ b/commons/zenoh-protocol/src/core/locator.rs @@ -16,9 +16,9 @@ use alloc::{borrow::ToOwned, string::String}; use core::{convert::TryFrom, fmt, hash::Hash, str::FromStr}; use zenoh_result::{Error as ZError, ZResult}; -// Locator -/// A `String` that respects the [`Locator`] canon form: `/
[?]`, -/// such that `` is of the form `=;...;=` where keys are alphabetically sorted. +/// A string that respects the [`Locator`] canon form: `/
[?]`. +/// +/// `` is of the form `=;...;=` where keys are alphabetically sorted. #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[serde(into = "String")] #[serde(try_from = "String")] From 41e25579f9f3c851f44c992946fd1d0c61fccfa9 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 12 Mar 2024 13:42:18 +0100 Subject: [PATCH 009/357] Protocol changes: EntityId (into protocol_changes) (#774) * New Subscribers EntityId behavior for clients and peers * Improve routing logging * New Queryables EntityId behavior for clients and peers * Improve routing logging * Use proper QueryableId in Session and AdminSpace * Sessions use runtime Id generator to avoid collisions * AdminSpace use runtime Id generator to avoid collisions * Use proper ResponderId * Define EntityId type * Add source_eid to SourceInfo * Update source_info_stack_size test * Update source_info_stack_size test * Introduce EntityGlobalId type * Add id() function to Subscriber, Queryable and Publisher * Add Publication::with_source_info() function * Code format * Remove ref to PR #703 * Fix doctests * Add comments * Remove comments --- commons/zenoh-codec/src/network/declare.rs | 13 +- commons/zenoh-codec/src/network/mod.rs | 22 +- commons/zenoh-codec/src/zenoh/mod.rs | 26 +- commons/zenoh-protocol/src/core/mod.rs | 21 + commons/zenoh-protocol/src/core/wire_expr.rs | 4 + commons/zenoh-protocol/src/network/declare.rs | 17 +- commons/zenoh-protocol/src/network/mod.rs | 10 +- .../zenoh-protocol/src/network/response.rs | 2 +- commons/zenoh-protocol/src/zenoh/mod.rs | 10 +- zenoh/src/lib.rs | 2 +- zenoh/src/net/routing/dispatcher/face.rs | 6 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 115 +++-- zenoh/src/net/routing/dispatcher/queries.rs | 103 ++-- zenoh/src/net/routing/dispatcher/resource.rs | 14 +- zenoh/src/net/routing/hat/client/mod.rs | 26 +- zenoh/src/net/routing/hat/client/pubsub.rs | 107 ++-- zenoh/src/net/routing/hat/client/queries.rs | 113 +++-- .../src/net/routing/hat/linkstate_peer/mod.rs | 30 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 136 ++--- .../net/routing/hat/linkstate_peer/queries.rs | 142 +++--- zenoh/src/net/routing/hat/mod.rs | 17 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 29 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 106 ++-- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 113 +++-- zenoh/src/net/routing/hat/router/mod.rs | 31 +- zenoh/src/net/routing/hat/router/pubsub.rs | 213 ++++---- zenoh/src/net/routing/hat/router/queries.rs | 220 +++++---- zenoh/src/net/runtime/adminspace.rs | 8 +- zenoh/src/net/runtime/mod.rs | 8 + zenoh/src/net/tests/tables.rs | 88 +++- zenoh/src/prelude.rs | 5 + zenoh/src/publication.rs | 83 +++- zenoh/src/queryable.rs | 73 +-- zenoh/src/sample.rs | 20 +- zenoh/src/session.rs | 467 +++++++----------- zenoh/src/subscriber.rs | 26 + 36 files changed, 1349 insertions(+), 1077 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index 6df25a8d2a..bcc55ed62b 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -441,14 +441,19 @@ where let subscriber::UndeclareSubscriber { id, ext_wire_expr } = x; // Header - let header = declare::id::U_SUBSCRIBER | subscriber::flag::Z; + let mut header = declare::id::U_SUBSCRIBER; + if !ext_wire_expr.is_null() { + header |= subscriber::flag::Z; + } self.write(&mut *writer, header)?; // Body self.write(&mut *writer, id)?; // Extension - self.write(&mut *writer, (ext_wire_expr, false))?; + if !ext_wire_expr.is_null() { + self.write(&mut *writer, (ext_wire_expr, false))?; + } Ok(()) } @@ -483,7 +488,6 @@ where let id: subscriber::SubscriberId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, subscriber::flag::Z); @@ -665,7 +669,6 @@ where let id: queryable::QueryableId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, queryable::flag::Z); @@ -813,7 +816,6 @@ where let id: token::TokenId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); @@ -1032,7 +1034,6 @@ where let id: interest::InterestId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index dade13d362..3a227cd42a 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -26,8 +26,8 @@ use zenoh_buffers::{ }; use zenoh_protocol::{ common::{imsg, ZExtZ64, ZExtZBufHeader}, - core::{Reliability, ZenohId}, - network::{ext::EntityIdType, *}, + core::{EntityId, Reliability, ZenohId}, + network::{ext::EntityGlobalIdType, *}, }; // NetworkMessage @@ -218,21 +218,21 @@ where } // Extension: EntityId -impl LCodec<&ext::EntityIdType<{ ID }>> for Zenoh080 { - fn w_len(self, x: &ext::EntityIdType<{ ID }>) -> usize { - let EntityIdType { zid, eid } = x; +impl LCodec<&ext::EntityGlobalIdType<{ ID }>> for Zenoh080 { + fn w_len(self, x: &ext::EntityGlobalIdType<{ ID }>) -> usize { + let EntityGlobalIdType { zid, eid } = x; 1 + self.w_len(zid) + self.w_len(*eid) } } -impl WCodec<(&ext::EntityIdType<{ ID }>, bool), &mut W> for Zenoh080 +impl WCodec<(&ext::EntityGlobalIdType<{ ID }>, bool), &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: (&ext::EntityIdType<{ ID }>, bool)) -> Self::Output { + fn write(self, writer: &mut W, x: (&ext::EntityGlobalIdType<{ ID }>, bool)) -> Self::Output { let (x, more) = x; let header: ZExtZBufHeader<{ ID }> = ZExtZBufHeader::new(self.w_len(x)); self.write(&mut *writer, (&header, more))?; @@ -248,13 +248,13 @@ where } } -impl RCodec<(ext::EntityIdType<{ ID }>, bool), &mut R> for Zenoh080Header +impl RCodec<(ext::EntityGlobalIdType<{ ID }>, bool), &mut R> for Zenoh080Header where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result<(ext::EntityIdType<{ ID }>, bool), Self::Error> { + fn read(self, reader: &mut R) -> Result<(ext::EntityGlobalIdType<{ ID }>, bool), Self::Error> { let (_, more): (ZExtZBufHeader<{ ID }>, bool) = self.read(&mut *reader)?; let flags: u8 = self.codec.read(&mut *reader)?; @@ -263,8 +263,8 @@ where let lodec = Zenoh080Length::new(length); let zid: ZenohId = lodec.read(&mut *reader)?; - let eid: u32 = self.codec.read(&mut *reader)?; + let eid: EntityId = self.codec.read(&mut *reader)?; - Ok((ext::EntityIdType { zid, eid }, more)) + Ok((ext::EntityGlobalIdType { zid, eid }, more)) } } diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index fdff09be94..0d7146dc90 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -32,7 +32,7 @@ use zenoh_buffers::{ use zenoh_protocol::common::{iext, ZExtUnit}; use zenoh_protocol::{ common::{imsg, ZExtZBufHeader}, - core::{Encoding, ZenohId}, + core::{Encoding, EntityGlobalId, EntityId, ZenohId}, zenoh::{ext, id, PushBody, RequestBody, ResponseBody}, }; @@ -150,9 +150,9 @@ where // Extension: SourceInfo impl LCodec<&ext::SourceInfoType<{ ID }>> for Zenoh080 { fn w_len(self, x: &ext::SourceInfoType<{ ID }>) -> usize { - let ext::SourceInfoType { zid, eid, sn } = x; + let ext::SourceInfoType { id, sn } = x; - 1 + self.w_len(zid) + self.w_len(*eid) + self.w_len(*sn) + 1 + self.w_len(&id.zid) + self.w_len(id.eid) + self.w_len(*sn) } } @@ -164,18 +164,18 @@ where fn write(self, writer: &mut W, x: (&ext::SourceInfoType<{ ID }>, bool)) -> Self::Output { let (x, more) = x; - let ext::SourceInfoType { zid, eid, sn } = x; + let ext::SourceInfoType { id, sn } = x; let header: ZExtZBufHeader<{ ID }> = ZExtZBufHeader::new(self.w_len(x)); self.write(&mut *writer, (&header, more))?; - let flags: u8 = (zid.size() as u8 - 1) << 4; + let flags: u8 = (id.zid.size() as u8 - 1) << 4; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(zid.size()); - lodec.write(&mut *writer, zid)?; + let lodec = Zenoh080Length::new(id.zid.size()); + lodec.write(&mut *writer, &id.zid)?; - self.write(&mut *writer, eid)?; + self.write(&mut *writer, id.eid)?; self.write(&mut *writer, sn)?; Ok(()) } @@ -196,10 +196,16 @@ where let lodec = Zenoh080Length::new(length); let zid: ZenohId = lodec.read(&mut *reader)?; - let eid: u32 = self.codec.read(&mut *reader)?; + let eid: EntityId = self.codec.read(&mut *reader)?; let sn: u32 = self.codec.read(&mut *reader)?; - Ok((ext::SourceInfoType { zid, eid, sn }, more)) + Ok(( + ext::SourceInfoType { + id: EntityGlobalId { zid, eid }, + sn, + }, + more, + )) } } diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 82658db2fd..20fcf85dd9 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -261,6 +261,27 @@ impl<'de> serde::Deserialize<'de> for ZenohId { } } +/// The unique id of a zenoh entity inside it's parent [`Session`]. +pub type EntityId = u32; + +/// The global unique id of a zenoh entity. +#[derive(Debug, Default, Clone, Eq, Hash, PartialEq)] +pub struct EntityGlobalId { + pub zid: ZenohId, + pub eid: EntityId, +} + +impl EntityGlobalId { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + Self { + zid: ZenohId::rand(), + eid: rand::thread_rng().gen(), + } + } +} + #[repr(u8)] #[derive(Debug, Default, Copy, Clone, Eq, Hash, PartialEq)] pub enum Priority { diff --git a/commons/zenoh-protocol/src/core/wire_expr.rs b/commons/zenoh-protocol/src/core/wire_expr.rs index 6d9623d6ca..a66b1aa212 100644 --- a/commons/zenoh-protocol/src/core/wire_expr.rs +++ b/commons/zenoh-protocol/src/core/wire_expr.rs @@ -71,6 +71,10 @@ impl<'a> WireExpr<'a> { } } + pub fn is_empty(&self) -> bool { + self.scope == 0 && self.suffix.as_ref().is_empty() + } + pub fn as_str(&'a self) -> &'a str { if self.scope == 0 { self.suffix.as_ref() diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 8164d9440d..2dd8de4ef8 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -177,7 +177,6 @@ pub mod common { pub mod ext { use super::*; - // WARNING: this is a temporary and mandatory extension used for undeclarations pub type WireExprExt = zextzbuf!(0x0f, true); #[derive(Debug, Clone, PartialEq, Eq)] pub struct WireExprType { @@ -195,6 +194,10 @@ pub mod common { } } + pub fn is_null(&self) -> bool { + self.wire_expr.is_empty() + } + #[cfg(feature = "test")] pub fn rand() -> Self { Self { @@ -286,9 +289,11 @@ pub mod keyexpr { } pub mod subscriber { + use crate::core::EntityId; + use super::*; - pub type SubscriberId = u32; + pub type SubscriberId = EntityId; pub mod flag { pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix @@ -441,7 +446,6 @@ pub mod subscriber { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareSubscriber { pub id: SubscriberId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -460,9 +464,11 @@ pub mod subscriber { } pub mod queryable { + use crate::core::EntityId; + use super::*; - pub type QueryableId = u32; + pub type QueryableId = EntityId; pub mod flag { pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix @@ -597,7 +603,6 @@ pub mod queryable { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareQueryable { pub id: QueryableId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -683,7 +688,6 @@ pub mod token { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareToken { pub id: TokenId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -1097,7 +1101,6 @@ pub mod interest { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareInterest { pub id: InterestId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index bb76cb8946..6af7fef243 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -200,7 +200,7 @@ impl From for NetworkMessage { pub mod ext { use crate::{ common::{imsg, ZExtZ64}, - core::{CongestionControl, Priority, ZenohId}, + core::{CongestionControl, EntityId, Priority, ZenohId}, }; use core::fmt; @@ -407,19 +407,19 @@ pub mod ext { /// % eid % /// +---------------+ #[derive(Debug, Clone, PartialEq, Eq)] - pub struct EntityIdType { + pub struct EntityGlobalIdType { pub zid: ZenohId, - pub eid: u32, + pub eid: EntityId, } - impl EntityIdType<{ ID }> { + impl EntityGlobalIdType<{ ID }> { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; let mut rng = rand::thread_rng(); let zid = ZenohId::rand(); - let eid: u32 = rng.gen(); + let eid: EntityId = rng.gen(); Self { zid, eid } } } diff --git a/commons/zenoh-protocol/src/network/response.rs b/commons/zenoh-protocol/src/network/response.rs index 9ef2c26a10..6f0925429b 100644 --- a/commons/zenoh-protocol/src/network/response.rs +++ b/commons/zenoh-protocol/src/network/response.rs @@ -67,7 +67,7 @@ pub mod ext { pub type TimestampType = crate::network::ext::TimestampType<{ Timestamp::ID }>; pub type ResponderId = zextzbuf!(0x3, false); - pub type ResponderIdType = crate::network::ext::EntityIdType<{ ResponderId::ID }>; + pub type ResponderIdType = crate::network::ext::EntityGlobalIdType<{ ResponderId::ID }>; } impl Response { diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 1284116888..3e5d573c43 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -158,7 +158,7 @@ impl From for ResponseBody { pub mod ext { use zenoh_buffers::ZBuf; - use crate::core::{Encoding, ZenohId}; + use crate::core::{Encoding, EntityGlobalId}; /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ @@ -172,8 +172,7 @@ pub mod ext { /// +---------------+ #[derive(Debug, Clone, PartialEq, Eq)] pub struct SourceInfoType { - pub zid: ZenohId, - pub eid: u32, + pub id: EntityGlobalId, pub sn: u32, } @@ -183,10 +182,9 @@ pub mod ext { use rand::Rng; let mut rng = rand::thread_rng(); - let zid = ZenohId::rand(); - let eid: u32 = rng.gen(); + let id = EntityGlobalId::rand(); let sn: u32 = rng.gen(); - Self { zid, eid, sn } + Self { id, sn } } } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index bae81d3a54..eb1ba1bcd1 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -79,7 +79,7 @@ extern crate zenoh_core; #[macro_use] extern crate zenoh_result; -pub(crate) type Id = usize; +pub(crate) type Id = u32; use git_version::git_version; use handlers::DefaultHandler; diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 6ef5c063d0..79c9da9127 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -171,6 +171,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, @@ -181,6 +182,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, ); @@ -190,6 +192,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, @@ -200,6 +203,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, ); @@ -244,7 +248,7 @@ impl Primitives for Face { pull_data(&self.tables.tables, &self.state.clone(), msg.wire_expr); } _ => { - log::error!("Unsupported request"); + log::error!("{} Unsupported request!", self); } } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index d6497a80b3..c0d1bb4a34 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -22,7 +22,7 @@ use std::sync::RwLock; use zenoh_core::zread; use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::Mode; +use zenoh_protocol::network::declare::{Mode, SubscriberId}; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::{declare::ext, Push}, @@ -34,17 +34,24 @@ pub(crate) fn declare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: SubscriberId, expr: &WireExpr, sub_info: &SubscriberInfo, node_id: NodeId, ) { - log::debug!("Declare subscription {}", face); let rtables = zread!(tables.tables); match rtables .get_mapping(face, &expr.scope, expr.mapping) .cloned() { Some(mut prefix) => { + log::debug!( + "{} Declare subscriber {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); let res = Resource::get_resource(&prefix, &expr.suffix); let (mut res, mut wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { @@ -66,7 +73,7 @@ pub(crate) fn declare_subscription( (res, wtables) }; - hat_code.declare_subscription(&mut wtables, face, &mut res, sub_info, node_id); + hat_code.declare_subscription(&mut wtables, face, id, &mut res, sub_info, node_id); disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -86,7 +93,12 @@ pub(crate) fn declare_subscription( } drop(wtables); } - None => log::error!("Declare subscription for unknown scope {}!", expr.scope), + None => log::error!( + "{} Declare subscriber {} for unknown scope {}!", + face, + id, + expr.scope + ), } } @@ -94,41 +106,60 @@ pub(crate) fn undeclare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: SubscriberId, expr: &WireExpr, node_id: NodeId, ) { - log::debug!("Undeclare subscription {}", face); - let rtables = zread!(tables.tables); - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - - hat_code.undeclare_subscription(&mut wtables, face, &mut res, node_id); - - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); + let res = if expr.is_empty() { + None + } else { + let rtables = zread!(tables.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(res) => Some(res), + None => { + log::error!( + "{} Undeclare unknown subscriber {}{}!", + face, + prefix.expr(), + expr.suffix + ); + return; } - Resource::clean(&mut res); - drop(wtables); + }, + None => { + log::error!( + "{} Undeclare subscriber with unknown scope {}", + face, + expr.scope + ); + return; } - None => log::error!("Undeclare unknown subscription!"), - }, - None => log::error!("Undeclare subscription with unknown scope!"), + } + }; + let mut wtables = zwrite!(tables.tables); + if let Some(mut res) = hat_code.undeclare_subscription(&mut wtables, face, id, res, node_id) { + log::debug!("{} Undeclare subscriber {} ({})", face, id, res.expr()); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes, matching_pulls) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); + } + Resource::clean(&mut res); + drop(wtables); + } else { + log::error!("{} Undeclare unknown subscriber {}", face, id); } } @@ -445,7 +476,8 @@ pub fn full_reentrant_route_data( match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { Some(prefix) => { log::trace!( - "Route data for res {}{}", + "{} Route data for res {}{}", + face, prefix.expr(), expr.suffix.as_ref() ); @@ -561,7 +593,7 @@ pub fn full_reentrant_route_data( } } None => { - log::error!("Route data with unknown scope {}!", expr.scope); + log::error!("{} Route data with unknown scope {}!", face, expr.scope); } } } @@ -602,14 +634,16 @@ pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireE } None => { log::error!( - "Pull data for unknown subscription {} (no info)!", + "{} Pull data for unknown subscriber {} (no info)!", + face, prefix.expr() + expr.suffix.as_ref() ); } }, None => { log::error!( - "Pull data for unknown subscription {} (no context)!", + "{} Pull data for unknown subscriber {} (no context)!", + face, prefix.expr() + expr.suffix.as_ref() ); } @@ -617,13 +651,14 @@ pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireE } None => { log::error!( - "Pull data for unknown subscription {} (no resource)!", + "{} Pull data for unknown subscriber {} (no resource)!", + face, prefix.expr() + expr.suffix.as_ref() ); } }, None => { - log::error!("Pull data with unknown scope {}!", expr.scope); + log::error!("{} Pull data with unknown scope {}!", face, expr.scope); } }; } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index b0f7f7f7ef..287621151a 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -21,16 +21,14 @@ use async_trait::async_trait; use std::collections::HashMap; use std::sync::{Arc, Weak}; use zenoh_config::WhatAmI; -use zenoh_protocol::zenoh::reply::ReplyBody; -use zenoh_protocol::zenoh::Put; use zenoh_protocol::{ core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ - declare::{ext, queryable::ext::QueryableInfo}, + declare::{ext, queryable::ext::QueryableInfo, QueryableId}, request::{ext::TargetType, Request, RequestId}, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, - zenoh::{query::Consolidation, Reply, RequestBody, ResponseBody}, + zenoh::{query::Consolidation, reply::ReplyBody, Put, Reply, RequestBody, ResponseBody}, }; use zenoh_sync::get_mut_unchecked; use zenoh_util::Timed; @@ -44,17 +42,24 @@ pub(crate) fn declare_queryable( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: QueryableId, expr: &WireExpr, qabl_info: &QueryableInfo, node_id: NodeId, ) { - log::debug!("Register queryable {}", face); let rtables = zread!(tables.tables); match rtables .get_mapping(face, &expr.scope, expr.mapping) .cloned() { Some(mut prefix) => { + log::debug!( + "{} Declare queryable {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); let res = Resource::get_resource(&prefix, &expr.suffix); let (mut res, mut wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { @@ -76,7 +81,7 @@ pub(crate) fn declare_queryable( (res, wtables) }; - hat_code.declare_queryable(&mut wtables, face, &mut res, qabl_info, node_id); + hat_code.declare_queryable(&mut wtables, face, id, &mut res, qabl_info, node_id); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -93,7 +98,12 @@ pub(crate) fn declare_queryable( } drop(wtables); } - None => log::error!("Declare queryable for unknown scope {}!", expr.scope), + None => log::error!( + "{} Declare queryable {} for unknown scope {}!", + face, + id, + expr.scope + ), } } @@ -101,37 +111,57 @@ pub(crate) fn undeclare_queryable( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: QueryableId, expr: &WireExpr, node_id: NodeId, ) { - let rtables = zread!(tables.tables); - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - - hat_code.undeclare_queryable(&mut wtables, face, &mut res, node_id); - - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); + let res = if expr.is_empty() { + None + } else { + let rtables = zread!(tables.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(res) => Some(res), + None => { + log::error!( + "{} Undeclare unknown queryable {}{}!", + face, + prefix.expr(), + expr.suffix + ); + return; } - Resource::clean(&mut res); - drop(wtables); + }, + None => { + log::error!( + "{} Undeclare queryable with unknown scope {}", + face, + expr.scope + ); + return; } - None => log::error!("Undeclare unknown queryable!"), - }, - None => log::error!("Undeclare queryable with unknown scope!"), + } + }; + let mut wtables = zwrite!(tables.tables); + if let Some(mut res) = hat_code.undeclare_queryable(&mut wtables, face, id, res, node_id) { + log::debug!("{} Undeclare queryable {} ({})", face, id, res.expr()); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } else { + log::error!("{} Undeclare unknown queryable {}", face, id); } } @@ -586,7 +616,7 @@ pub fn route_query( ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid, - eid: 0, // @TODO use proper ResponderId (#703) + eid: 0, // 0 is reserved for routing core }), }, expr.full_expr().to_string(), @@ -701,8 +731,9 @@ pub fn route_query( } None => { log::error!( - "Route query with unknown scope {}! Send final reply.", - expr.scope + "{} Route query with unknown scope {}! Send final reply.", + face, + expr.scope, ); drop(rtables); face.primitives diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 813d72a661..9f43841025 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -667,7 +667,11 @@ pub fn register_expr( let mut fullexpr = prefix.expr(); fullexpr.push_str(expr.suffix.as_ref()); if res.expr() != fullexpr { - log::error!("Resource {} remapped. Remapping unsupported!", expr_id); + log::error!( + "{} Resource {} remapped. Remapping unsupported!", + face, + expr_id + ); } } None => { @@ -718,7 +722,11 @@ pub fn register_expr( drop(wtables); } }, - None => log::error!("Declare resource with unknown scope {}!", expr.scope), + None => log::error!( + "{} Declare resource with unknown scope {}!", + face, + expr.scope + ), } } @@ -726,7 +734,7 @@ pub fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: let wtables = zwrite!(tables.tables); match get_mut_unchecked(face).remote_mappings.remove(&expr_id) { Some(mut res) => Resource::clean(&mut res), - None => log::error!("Undeclare unknown resource!"), + None => log::error!("{} Undeclare unknown resource!", face), } drop(wtables); } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index aa83c34f5d..05210bcaee 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -40,11 +40,11 @@ use super::{ }; use std::{ any::Any, - collections::{HashMap, HashSet}, - sync::Arc, + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, }; use zenoh_config::WhatAmI; -use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; +use zenoh_protocol::network::declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}; use zenoh_protocol::network::Oam; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -131,7 +131,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -159,7 +159,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -290,19 +290,21 @@ impl HatContext { } struct HatFace { - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfo)>, + remote_qabls: HashMap>, } impl HatFace { fn new() -> Self { Self { - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 828915018d..f9f827ecc5 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -22,8 +22,10 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ @@ -43,10 +45,11 @@ fn propagate_simple_subscription_to( ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains(res) + && !face_hat!(dst_face).local_subs.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -54,7 +57,7 @@ fn propagate_simple_subscription_to( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: *sub_info, }), @@ -83,13 +86,13 @@ fn propagate_simple_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -118,16 +121,17 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, res, sub_info); + register_client_subscription(tables, face, id, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; @@ -144,7 +148,7 @@ fn declare_client_subscription( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // @TODO use proper SubscriberId wire_expr: res.expr().into(), ext_info: *sub_info, }), @@ -170,21 +174,19 @@ fn client_subs(res: &Arc) -> Vec> { fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - face_hat_mut!(face).local_subs.remove(res); } } } @@ -194,45 +196,48 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - if client_subs.len() == 1 { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - face_hat_mut!(face).local_subs.remove(res); + let mut client_subs = client_subs(res); + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); + } + if client_subs.len() == 1 { + let face = &mut client_subs[0]; + if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } } } } + fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_subscription(tables, face, res); + id: SubscriberId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_client_subscription(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { @@ -246,7 +251,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } } @@ -257,27 +262,29 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, ) { - declare_client_subscription(tables, face, res, sub_info); + declare_client_subscription(tables, face, id, res, sub_info); } fn undeclare_subscription( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + _res: Option>, _node_id: NodeId, - ) { - forget_client_subscription(tables, face, res); + ) -> Option> { + forget_client_subscription(tables, face, id) } fn get_subscriptions(&self, tables: &Tables) -> Vec> { let mut subs = HashSet::new(); for src_face in tables.faces.values() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { subs.insert(sub.clone()); } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index c6dfc34eac..4964a8880a 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -23,10 +23,12 @@ use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ @@ -83,16 +85,19 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); + let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) + && (current.is_none() || current.unwrap().1 != info) && (src_face.is_none() || src_face.as_ref().unwrap().whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); + .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -100,7 +105,7 @@ fn propagate_simple_queryable( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -114,13 +119,13 @@ fn propagate_simple_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -135,16 +140,17 @@ fn register_client_queryable( })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, res, qabl_info); + register_client_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face)); } @@ -164,22 +170,19 @@ fn client_qabls(res: &Arc) -> Vec> { fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - - face_hat_mut!(face).local_qabls.remove(res); } } } @@ -189,38 +192,37 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(face).local_qabls.remove(res); + let mut client_qabls = client_qabls(res); + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + if client_qabls.len() == 1 { + let face = &mut client_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -228,9 +230,14 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_queryable(tables, face, res); + id: QueryableId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_client_queryable(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { @@ -240,7 +247,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) .cloned() .collect::>>() { - for qabl in face_hat!(face).remote_qabls.iter() { + for qabl in face_hat!(face).remote_qabls.values() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } @@ -255,27 +262,29 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, _node_id: NodeId, ) { - declare_client_queryable(tables, face, res, qabl_info); + declare_client_queryable(tables, face, id, res, qabl_info); } fn undeclare_queryable( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + _res: Option>, _node_id: NodeId, - ) { - forget_client_queryable(tables, face, res); + ) -> Option> { + forget_client_queryable(tables, face, id) } fn get_queryables(&self, tables: &Tables) -> Vec> { let mut qabls = HashSet::new(); for src_face in tables.faces.values() { - for qabl in &face_hat!(src_face).remote_qabls { + for qabl in face_hat!(src_face).remote_qabls.values() { qabls.insert(qabl.clone()); } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index a655d2f0a3..5591ea3b3e 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -47,12 +47,16 @@ use async_std::task::JoinHandle; use std::{ any::Any, collections::{HashMap, HashSet}, - sync::Arc, + sync::{atomic::AtomicU32, Arc}, }; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, + network::{ + declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}, + oam::id::OAM_LINKSTATE, + Oam, + }, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -126,7 +130,6 @@ impl HatTables { } fn schedule_compute_trees(&mut self, tables_ref: Arc) { - log::trace!("Schedule computations"); if self.peers_trees_task.is_none() { let task = Some(async_std::task::spawn(async move { async_std::task::sleep(std::time::Duration::from_millis( @@ -142,7 +145,6 @@ impl HatTables { pubsub::pubsub_tree_change(&mut tables, &new_childs); queries::queries_tree_change(&mut tables, &new_childs); - log::trace!("Computations completed"); hat_mut!(tables).peers_trees_task = None; })); self.peers_trees_task = task; @@ -248,7 +250,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -276,7 +278,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face + for (_, mut res) in face .hat .downcast_mut::() .unwrap() @@ -471,20 +473,22 @@ impl HatContext { struct HatFace { link_id: usize, - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfo)>, + remote_qabls: HashMap>, } impl HatFace { fn new() -> Self { Self { link_id: 0, - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index c364f7359f..9a41915333 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -25,8 +25,10 @@ use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI, ZenohId}, network::declare::{ @@ -53,8 +55,6 @@ fn send_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -63,7 +63,7 @@ fn send_sourced_subscription_to_net_childs( node_id: routing_context, }, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO + id: 0, // Sourced subscriptions do not use ids wire_expr: key_expr, ext_info: *sub_info, }), @@ -87,10 +87,11 @@ fn propagate_simple_subscription_to( src_face: &mut Arc, ) { if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) - && !face_hat!(dst_face).local_subs.contains(res) + && !face_hat!(dst_face).local_subs.contains_key(res) && dst_face.whatami == WhatAmI::Client { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -98,7 +99,7 @@ fn propagate_simple_subscription_to( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO + id, wire_expr: key_expr, ext_info: *sub_info, }), @@ -171,7 +172,6 @@ fn register_peer_subscription( if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription { - log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); res_hat_mut!(res).peer_subs.insert(peer); hat_mut!(tables).peer_subs.insert(res.clone()); } @@ -199,13 +199,13 @@ fn declare_peer_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -234,16 +234,17 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, res, sub_info); + register_client_subscription(tables, face, id, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; let zid = tables.zid; @@ -289,8 +290,6 @@ fn send_forget_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -299,7 +298,7 @@ fn send_forget_sourced_subscription_to_net_childs( node_id: routing_context.unwrap_or(0), }, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO + id: 0, // Sourced subscriptions do not use ids ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -315,21 +314,19 @@ fn send_forget_sourced_subscription_to_net_childs( fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - face_hat_mut!(face).local_subs.remove(res); } } } @@ -370,11 +367,6 @@ fn propagate_forget_sourced_subscription( } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!( - "Unregister peer subscription {} (peer: {})", - res.expr(), - peer - ); res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); if res_hat!(res).peer_subs.is_empty() { @@ -414,37 +406,34 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - let peer_subs = remote_peer_subs(tables, res); - if client_subs.is_empty() { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } - if client_subs.len() == 1 && !peer_subs { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - face_hat_mut!(face).local_subs.remove(res); + let mut client_subs = client_subs(res); + let peer_subs = remote_peer_subs(tables, res); + if client_subs.is_empty() { + undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + } + if client_subs.len() == 1 && !peer_subs { + let face = &mut client_subs[0]; + if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } } } } @@ -452,20 +441,26 @@ pub(super) fn undeclare_client_subscription( fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_subscription(tables, face, res); + id: SubscriberId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_client_subscription(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers mode: Mode::Push, }; if face.whatami == WhatAmI::Client { for sub in &hat!(tables).peer_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -473,7 +468,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO + id, wire_expr: key_expr, ext_info: sub_info, }), @@ -515,7 +510,7 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -585,7 +581,7 @@ impl HatPubSubTrait for HatCode { declare_peer_subscription(tables, face, res, sub_info, peer) } } else { - declare_client_subscription(tables, face, res, sub_info) + declare_client_subscription(tables, face, id, res, sub_info) } } @@ -593,15 +589,23 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + res: Option>, node_id: NodeId, - ) { + ) -> Option> { if face.whatami != WhatAmI::Client { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, res, &peer); + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_subscription(tables, face, &mut res, &peer); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_subscription(tables, face, res); + forget_client_subscription(tables, face, id) } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 4192f87e55..51aac2175a 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -26,10 +26,12 @@ use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ @@ -133,8 +135,6 @@ fn send_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -143,7 +143,7 @@ fn send_sourced_queryable_to_net_childs( node_id: routing_context, }, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, // Sourced queryables do not use ids wire_expr: key_expr, ext_info: *qabl_info, }), @@ -166,14 +166,17 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); + let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) + && (current.is_none() || current.unwrap().1 != info) && dst_face.whatami == WhatAmI::Client { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); + .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -181,7 +184,7 @@ fn propagate_simple_queryable( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -240,7 +243,6 @@ fn register_peer_queryable( if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { - log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); hat_mut!(tables).peer_qabls.insert(res.clone()); } @@ -269,13 +271,13 @@ fn declare_peer_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -290,17 +292,17 @@ fn register_client_queryable( })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, res, qabl_info); - + register_client_queryable(tables, face, id, res, qabl_info); let local_details = local_peer_qabl_info(tables, res); let zid = tables.zid; register_peer_queryable(tables, Some(face), res, &local_details, zid); @@ -345,8 +347,6 @@ fn send_forget_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -355,7 +355,7 @@ fn send_forget_sourced_queryable_to_net_childs( node_id: routing_context, }, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -371,22 +371,19 @@ fn send_forget_sourced_queryable_to_net_childs( fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - - face_hat_mut!(face).local_qabls.remove(res); } } } @@ -427,7 +424,6 @@ fn propagate_forget_sourced_queryable( } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -467,42 +463,41 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - let peer_qabls = remote_peer_qabls(tables, res); + let mut client_qabls = client_qabls(res); + let peer_qabls = remote_peer_qabls(tables, res); - if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid); - } - - if client_qabls.len() == 1 && !peer_qabls { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if client_qabls.is_empty() { + undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_peer_qabl_info(tables, res); + register_peer_queryable(tables, None, res, &local_info, tables.zid); + } - face_hat_mut!(face).local_qabls.remove(res); + if client_qabls.len() == 1 && !peer_qabls { + let face = &mut client_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -510,9 +505,14 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_queryable(tables, face, res); + id: QueryableId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_client_queryable(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { @@ -520,7 +520,10 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { for qabl in &hat!(tables).peer_qabls { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -528,7 +531,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -641,6 +644,7 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, @@ -650,7 +654,7 @@ impl HatQueriesTrait for HatCode { declare_peer_queryable(tables, face, res, qabl_info, peer); } } else { - declare_client_queryable(tables, face, res, qabl_info); + declare_client_queryable(tables, face, id, res, qabl_info); } } @@ -658,15 +662,23 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + res: Option>, node_id: NodeId, - ) { + ) -> Option> { if face.whatami != WhatAmI::Client { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, res, &peer); + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_queryable(tables, face, &mut res, &peer); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_queryable(tables, face, res); + forget_client_queryable(tables, face, id) } } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 4fbf9c9e5d..d9feb687f2 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -31,7 +31,10 @@ use zenoh_config::{unwrap_or_default, Config, WhatAmI}; use zenoh_protocol::{ core::WireExpr, network::{ - declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, + declare::{ + queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo, QueryableId, + SubscriberId, + }, Oam, }, }; @@ -117,6 +120,7 @@ pub(crate) trait HatPubSubTrait { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -125,9 +129,10 @@ pub(crate) trait HatPubSubTrait { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + res: Option>, node_id: NodeId, - ); + ) -> Option>; fn get_subscriptions(&self, tables: &Tables) -> Vec>; @@ -147,6 +152,7 @@ pub(crate) trait HatQueriesTrait { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, @@ -155,9 +161,10 @@ pub(crate) trait HatQueriesTrait { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + res: Option>, node_id: NodeId, - ); + ) -> Option>; fn get_queryables(&self, tables: &Tables) -> Vec>; diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 8dc4f15ada..1a6c1ba407 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -45,11 +45,14 @@ use super::{ }; use std::{ any::Any, - collections::{HashMap, HashSet}, - sync::Arc, + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, }; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; -use zenoh_protocol::network::Oam; +use zenoh_protocol::network::{ + declare::{QueryableId, SubscriberId}, + Oam, +}; use zenoh_protocol::{ common::ZExtBody, network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE}, @@ -177,7 +180,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -205,7 +208,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -363,19 +366,21 @@ impl HatContext { } struct HatFace { - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfo)>, + remote_qabls: HashMap>, } impl HatFace { fn new() -> Self { Self { - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index a7d58ce1a5..4f6ce5aeca 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -22,8 +22,10 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ @@ -43,10 +45,11 @@ fn propagate_simple_subscription_to( ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains(res) + && !face_hat!(dst_face).local_subs.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -54,7 +57,7 @@ fn propagate_simple_subscription_to( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: *sub_info, }), @@ -83,13 +86,13 @@ fn propagate_simple_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -118,16 +121,17 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, res, sub_info); + register_client_subscription(tables, face, id, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; @@ -144,7 +148,7 @@ fn declare_client_subscription( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // @TODO use proper SubscriberId wire_expr: res.expr().into(), ext_info: *sub_info, }), @@ -170,21 +174,19 @@ fn client_subs(res: &Arc) -> Vec> { fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - face_hat_mut!(face).local_subs.remove(res); } } } @@ -194,36 +196,33 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - if client_subs.len() == 1 { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - face_hat_mut!(face).local_subs.remove(res); + let mut client_subs = client_subs(res); + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); + } + if client_subs.len() == 1 { + let face = &mut client_subs[0]; + if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } } } } @@ -231,9 +230,14 @@ pub(super) fn undeclare_client_subscription( fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_subscription(tables, face, res); + id: SubscriberId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_client_subscription(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { @@ -247,7 +251,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } } @@ -258,27 +262,29 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, ) { - declare_client_subscription(tables, face, res, sub_info); + declare_client_subscription(tables, face, id, res, sub_info); } fn undeclare_subscription( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + _res: Option>, _node_id: NodeId, - ) { - forget_client_subscription(tables, face, res); + ) -> Option> { + forget_client_subscription(tables, face, id) } fn get_subscriptions(&self, tables: &Tables) -> Vec> { let mut subs = HashSet::new(); for src_face in tables.faces.values() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { subs.insert(sub.clone()); } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 68f2669f6f..04b31b41ef 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -23,10 +23,12 @@ use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ @@ -83,16 +85,19 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); + let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) + && (current.is_none() || current.unwrap().1 != info) && (src_face.is_none() || src_face.as_ref().unwrap().whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); + .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -100,7 +105,7 @@ fn propagate_simple_queryable( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -114,13 +119,13 @@ fn propagate_simple_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -135,16 +140,17 @@ fn register_client_queryable( })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, res, qabl_info); + register_client_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face)); } @@ -164,22 +170,19 @@ fn client_qabls(res: &Arc) -> Vec> { fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - - face_hat_mut!(face).local_qabls.remove(res); } } } @@ -189,38 +192,37 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(face).local_qabls.remove(res); + let mut client_qabls = client_qabls(res); + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + if client_qabls.len() == 1 { + let face = &mut client_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -228,9 +230,14 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_queryable(tables, face, res); + id: QueryableId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_client_queryable(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { @@ -240,7 +247,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) .cloned() .collect::>>() { - for qabl in face_hat!(face).remote_qabls.iter() { + for qabl in face_hat!(face).remote_qabls.values() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } @@ -255,27 +262,29 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, _node_id: NodeId, ) { - declare_client_queryable(tables, face, res, qabl_info); + declare_client_queryable(tables, face, id, res, qabl_info); } fn undeclare_queryable( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + _res: Option>, _node_id: NodeId, - ) { - forget_client_queryable(tables, face, res); + ) -> Option> { + forget_client_queryable(tables, face, id) } fn get_queryables(&self, tables: &Tables) -> Vec> { let mut qabls = HashSet::new(); for src_face in tables.faces.values() { - for qabl in &face_hat!(src_face).remote_qabls { + for qabl in face_hat!(src_face).remote_qabls.values() { qabls.insert(qabl.clone()); } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 24c837e8f5..ff576ae271 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -52,12 +52,16 @@ use std::{ any::Any, collections::{hash_map::DefaultHasher, HashMap, HashSet}, hash::Hasher, - sync::Arc, + sync::{atomic::AtomicU32, Arc}, }; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, + network::{ + declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}, + oam::id::OAM_LINKSTATE, + Oam, + }, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -232,14 +236,12 @@ impl HatTables { .as_ref() .map(|net| { let links = net.get_links(peer1); - log::debug!("failover_brokering {} {} ({:?})", peer1, peer2, links); HatTables::failover_brokering_to(links, peer2) }) .unwrap_or(false) } fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { - log::trace!("Schedule computations"); if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) { @@ -264,7 +266,6 @@ impl HatTables { pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); queries::queries_tree_change(&mut tables, &new_childs, net_type); - log::trace!("Computations completed"); match net_type { WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, _ => hat_mut!(tables).peers_trees_task = None, @@ -418,7 +419,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face + for (_id, mut res) in face .hat .downcast_mut::() .unwrap() @@ -446,7 +447,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face + for (_, mut res) in face .hat .downcast_mut::() .unwrap() @@ -773,20 +774,22 @@ impl HatContext { struct HatFace { link_id: usize, - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfo)>, + remote_qabls: HashMap>, } impl HatFace { fn new() -> Self { Self { link_id: 0, - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 6030269cfa..da1ca66efd 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -25,8 +25,10 @@ use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI, ZenohId}, network::declare::{ @@ -53,8 +55,6 @@ fn send_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -63,7 +63,7 @@ fn send_sourced_subscription_to_net_childs( node_id: routing_context, }, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // Sourced subscriptions do not use ids wire_expr: key_expr, ext_info: *sub_info, }), @@ -89,7 +89,7 @@ fn propagate_simple_subscription_to( ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains(res) + && !face_hat!(dst_face).local_subs.contains_key(res) && if full_peer_net { dst_face.whatami == WhatAmI::Client } else { @@ -99,7 +99,8 @@ fn propagate_simple_subscription_to( || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) } { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -107,7 +108,7 @@ fn propagate_simple_subscription_to( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: *sub_info, }), @@ -189,11 +190,6 @@ fn register_router_subscription( if !res_hat!(res).router_subs.contains(&router) { // Register router subscription { - log::debug!( - "Register router subscription {} (router: {})", - res.expr(), - router - ); res_hat_mut!(res).router_subs.insert(router); hat_mut!(tables).router_subs.insert(res.clone()); } @@ -230,7 +226,6 @@ fn register_peer_subscription( if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription { - log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); res_hat_mut!(res).peer_subs.insert(peer); hat_mut!(tables).peer_subs.insert(res.clone()); } @@ -257,13 +252,13 @@ fn declare_peer_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -292,16 +287,17 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, res, sub_info); + register_client_subscription(tables, face, id, res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; let zid = tables.zid; @@ -356,8 +352,6 @@ fn send_forget_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -366,7 +360,7 @@ fn send_forget_sourced_subscription_to_net_childs( node_id: routing_context.unwrap_or(0), }, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // Sourced subscriptions do not use ids ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -382,21 +376,19 @@ fn send_forget_sourced_subscription_to_net_childs( fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - face_hat_mut!(face).local_subs.remove(res); } } } @@ -413,7 +405,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< .collect::>>() { if face.whatami == WhatAmI::Peer - && face_hat!(face).local_subs.contains(res) + && face_hat!(face).local_subs.contains_key(res) && !res.session_ctxs.values().any(|s| { face.zid != s.face.zid && s.subs.is_some() @@ -422,21 +414,20 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< && hat!(tables).failover_brokering(s.face.zid, face.zid))) }) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(&mut face).local_subs.remove(res); + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -479,11 +470,6 @@ fn propagate_forget_sourced_subscription( } fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router subscription {} (router: {})", - res.expr(), - router - ); res_hat_mut!(res).router_subs.retain(|sub| sub != router); if res_hat!(res).router_subs.is_empty() { @@ -522,11 +508,6 @@ fn forget_router_subscription( } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!( - "Unregister peer subscription {} (peer: {})", - res.expr(), - peer - ); res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); if res_hat!(res).peer_subs.is_empty() { @@ -568,40 +549,37 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - let router_subs = remote_router_subs(tables, res); - let peer_subs = remote_peer_subs(tables, res); - if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription_to_peers(tables, res); - } - if client_subs.len() == 1 && !router_subs && !peer_subs { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - face_hat_mut!(face).local_subs.remove(res); + let mut client_subs = client_subs(res); + let router_subs = remote_router_subs(tables, res); + let peer_subs = remote_peer_subs(tables, res); + if client_subs.is_empty() && !peer_subs { + undeclare_router_subscription(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_subscription_to_peers(tables, res); + } + if client_subs.len() == 1 && !router_subs && !peer_subs { + let face = &mut client_subs[0]; + if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } } } } @@ -609,9 +587,14 @@ pub(super) fn undeclare_client_subscription( fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_subscription(tables, face, res); + id: SubscriberId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_client_subscription(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { @@ -622,7 +605,8 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { if face.whatami == WhatAmI::Client { for sub in &hat!(tables).router_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -630,7 +614,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: sub_info, }), @@ -649,7 +633,8 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { && hat!(tables).failover_brokering(s.face.zid, face.zid))) })) { - face_hat_mut!(face).local_subs.insert(sub.clone()); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -657,7 +642,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: sub_info, }), @@ -760,7 +745,7 @@ pub(super) fn pubsub_tree_change( pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in &face_hat!(src_face).remote_subs { + for res in face_hat!(src_face).remote_subs.values() { let client_subs = res .session_ctxs .values() @@ -772,7 +757,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_subs.contains(res) { + if let Some(id) = face_hat!(dst_face).local_subs.get(res).cloned() { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = hat!(tables) @@ -790,7 +775,6 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }) }; if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -798,8 +782,8 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber( UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }, ), }, @@ -810,7 +794,8 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: } } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; - face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); let key_expr = Resource::decl_key(res, dst_face); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers @@ -822,7 +807,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: sub_info, }), @@ -876,6 +861,7 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -892,10 +878,10 @@ impl HatPubSubTrait for HatCode { declare_peer_subscription(tables, face, res, sub_info, peer) } } else { - declare_client_subscription(tables, face, res, sub_info) + declare_client_subscription(tables, face, id, res, sub_info) } } - _ => declare_client_subscription(tables, face, res, sub_info), + _ => declare_client_subscription(tables, face, id, res, sub_info), } } @@ -903,25 +889,40 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + res: Option>, node_id: NodeId, - ) { + ) -> Option> { match face.whatami { WhatAmI::Router => { - if let Some(router) = get_router(tables, face, node_id) { - forget_router_subscription(tables, face, res, &router) + if let Some(mut res) = res { + if let Some(router) = get_router(tables, face, node_id) { + forget_router_subscription(tables, face, &mut res, &router); + Some(res) + } else { + None + } + } else { + None } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, res, &peer) + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_subscription(tables, face, &mut res, &peer); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_subscription(tables, face, res) + forget_client_subscription(tables, face, id) } } - _ => forget_client_subscription(tables, face, res), + _ => forget_client_subscription(tables, face, id), } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 008e71d7af..b76f0adcc6 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -26,10 +26,12 @@ use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; +use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ @@ -204,8 +206,6 @@ fn send_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -214,7 +214,7 @@ fn send_sourced_queryable_to_net_childs( node_id: routing_context, }, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, // Sourced queryables do not use ids wire_expr: key_expr, ext_info: *qabl_info, }), @@ -238,9 +238,9 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); + let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) + && (current.is_none() || current.unwrap().1 != info) && if full_peers_net { dst_face.whatami == WhatAmI::Client } else { @@ -252,9 +252,12 @@ fn propagate_simple_queryable( .failover_brokering(src_face.as_ref().unwrap().zid, dst_face.zid)) } { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); + .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -262,7 +265,7 @@ fn propagate_simple_queryable( ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -322,11 +325,6 @@ fn register_router_queryable( if current_info.is_none() || current_info.unwrap() != qabl_info { // Register router queryable { - log::debug!( - "Register router queryable {} (router: {})", - res.expr(), - router, - ); res_hat_mut!(res).router_qabls.insert(router, *qabl_info); hat_mut!(tables).router_qabls.insert(res.clone()); } @@ -375,7 +373,6 @@ fn register_peer_queryable( if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { - log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); hat_mut!(tables).peer_qabls.insert(res.clone()); } @@ -402,13 +399,13 @@ fn declare_peer_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -423,16 +420,17 @@ fn register_client_queryable( })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, res, qabl_info); + register_client_queryable(tables, face, id, res, qabl_info); let local_details = local_router_qabl_info(tables, res); let zid = tables.zid; register_router_queryable(tables, Some(face), res, &local_details, zid); @@ -486,8 +484,6 @@ fn send_forget_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -496,7 +492,7 @@ fn send_forget_sourced_queryable_to_net_childs( node_id: routing_context, }, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, // Sourced queryables do not use ids ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -512,22 +508,19 @@ fn send_forget_sourced_queryable_to_net_childs( fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), )); - - face_hat_mut!(face).local_qabls.remove(res); } } } @@ -553,21 +546,20 @@ fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc && hat!(tables).failover_brokering(s.face.zid, face.zid))) }) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(&mut face).local_qabls.remove(res); + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -610,11 +602,6 @@ fn propagate_forget_sourced_queryable( } fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router queryable {} (router: {})", - res.expr(), - router, - ); res_hat_mut!(res).router_qabls.remove(router); if res_hat!(res).router_qabls.is_empty() { @@ -653,7 +640,6 @@ fn forget_router_queryable( } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -699,44 +685,43 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - let router_qabls = remote_router_qabls(tables, res); - let peer_qabls = remote_peer_qabls(tables, res); + let mut client_qabls = client_qabls(res); + let router_qabls = remote_router_qabls(tables, res); + let peer_qabls = remote_peer_qabls(tables, res); - if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid); - propagate_forget_simple_queryable_to_peers(tables, res); - } - - if client_qabls.len() == 1 && !router_qabls && !peer_qabls { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); + if client_qabls.is_empty() && !peer_qabls { + undeclare_router_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, res); + register_router_queryable(tables, None, res, &local_info, tables.zid); + propagate_forget_simple_queryable_to_peers(tables, res); + } - face_hat_mut!(face).local_qabls.remove(res); + if client_qabls.len() == 1 && !router_qabls && !peer_qabls { + let face = &mut client_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } } } } @@ -744,9 +729,14 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, -) { - undeclare_client_queryable(tables, face, res); + id: QueryableId, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_client_queryable(tables, face, &mut res); + Some(res) + } else { + None + } } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { @@ -754,7 +744,10 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -762,7 +755,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -783,7 +776,10 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { })) { let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -791,7 +787,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -853,7 +849,7 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid) { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in &face_hat!(src_face).remote_qabls { + for res in face_hat!(src_face).remote_qabls.values() { let client_qabls = res .session_ctxs .values() @@ -865,7 +861,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_qabls.contains_key(res) { + if let Some(id) = face_hat!(dst_face).local_subs.get(res).cloned() { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = hat!(tables) @@ -883,7 +879,6 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links }) }; if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { ext_qos: ext::QoSType::DECLARE, @@ -891,8 +886,8 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable( UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }, ), }, @@ -904,9 +899,10 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; let info = local_qabl_info(tables, res, dst_face); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face) .local_qabls - .insert(res.clone(), info); + .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -914,7 +910,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -1024,6 +1020,7 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, @@ -1040,10 +1037,10 @@ impl HatQueriesTrait for HatCode { declare_peer_queryable(tables, face, res, qabl_info, peer) } } else { - declare_client_queryable(tables, face, res, qabl_info) + declare_client_queryable(tables, face, id, res, qabl_info) } } - _ => declare_client_queryable(tables, face, res, qabl_info), + _ => declare_client_queryable(tables, face, id, res, qabl_info), } } @@ -1051,25 +1048,40 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + res: Option>, node_id: NodeId, - ) { + ) -> Option> { match face.whatami { WhatAmI::Router => { - if let Some(router) = get_router(tables, face, node_id) { - forget_router_queryable(tables, face, res, &router) + if let Some(mut res) = res { + if let Some(router) = get_router(tables, face, node_id) { + forget_router_queryable(tables, face, &mut res, &router); + Some(res) + } else { + None + } + } else { + None } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, res, &peer) + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_queryable(tables, face, &mut res, &peer); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_queryable(tables, face, res) + forget_client_queryable(tables, face, id) } } - _ => forget_client_queryable(tables, face, res), + _ => forget_client_queryable(tables, face, id), } } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 03b447aae0..e76475f447 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -32,6 +32,7 @@ use std::sync::Mutex; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; use zenoh_plugin_trait::{PluginControl, PluginStatus}; +use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, @@ -59,6 +60,7 @@ type Handler = Arc; pub struct AdminSpace { zid: ZenohId, + queryable_id: QueryableId, primitives: Mutex>>, mappings: Mutex>, handlers: HashMap, @@ -189,6 +191,7 @@ impl AdminSpace { }); let admin = Arc::new(AdminSpace { zid: runtime.zid(), + queryable_id: runtime.next_id(), primitives: Mutex::new(None), mappings: Mutex::new(HashMap::new()), handlers, @@ -278,7 +281,7 @@ impl AdminSpace { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: runtime.next_id(), wire_expr: [&root_key, "/**"].concat().into(), ext_info: QueryableInfo { complete: 0, @@ -292,7 +295,7 @@ impl AdminSpace { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: runtime.next_id(), wire_expr: [&root_key, "/config/**"].concat().into(), ext_info: SubscriberInfo::DEFAULT, }), @@ -431,6 +434,7 @@ impl Primitives for AdminSpace { #[cfg(feature = "unstable")] attachment: query.ext_attachment.map(Into::into), }), + eid: self.queryable_id, }; for (key, handler) in &self.handlers { diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 7061b38622..8b116b1080 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -30,6 +30,7 @@ use async_std::task::JoinHandle; use futures::stream::StreamExt; use futures::Future; use std::any::Any; +use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::Arc; use stop_token::future::FutureExt; use stop_token::{StopSource, TimedOutError}; @@ -48,6 +49,7 @@ use zenoh_transport::{ struct RuntimeState { zid: ZenohId, whatami: WhatAmI, + next_id: AtomicU32, metadata: serde_json::Value, router: Arc, config: Notifier, @@ -114,6 +116,7 @@ impl Runtime { state: Arc::new(RuntimeState { zid, whatami, + next_id: AtomicU32::new(1), // 0 is reserved for routing core metadata, router, config: config.clone(), @@ -154,6 +157,11 @@ impl Runtime { zwrite!(self.state.transport_handlers).push(handler); } + #[inline] + pub fn next_id(&self) -> u32 { + self.state.next_id.fetch_add(1, Ordering::SeqCst) + } + pub async fn close(&self) -> ZResult<()> { log::trace!("Runtime::close())"); drop(self.state.stop_source.write().unwrap().take()); diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 80a9dd458a..4560eefaae 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -66,6 +66,7 @@ fn base_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face.upgrade().unwrap(), + 0, &WireExpr::from(1).with_suffix("four/five"), &sub_info, NodeId::default(), @@ -166,6 +167,76 @@ fn match_test() { } } +#[test] +fn multisub_test() { + let config = Config::default(); + let router = Router::new( + ZenohId::try_from([1]).unwrap(), + WhatAmI::Client, + Some(Arc::new(HLC::default())), + &config, + ) + .unwrap(); + let tables = router.tables.clone(); + + let primitives = Arc::new(DummyPrimitives {}); + let face0 = Arc::downgrade(&router.new_primitives(primitives).state); + assert!(face0.upgrade().is_some()); + + // -------------- + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, + mode: Mode::Push, + }; + declare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 0, + &"sub".into(), + &sub_info, + NodeId::default(), + ); + let optres = Resource::get_resource(zread!(tables.tables)._get_root(), "sub") + .map(|res| Arc::downgrade(&res)); + assert!(optres.is_some()); + let res = optres.unwrap(); + assert!(res.upgrade().is_some()); + + declare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 1, + &"sub".into(), + &sub_info, + NodeId::default(), + ); + assert!(res.upgrade().is_some()); + + undeclare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 0, + &WireExpr::empty(), + NodeId::default(), + ); + assert!(res.upgrade().is_some()); + + undeclare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 1, + &WireExpr::empty(), + NodeId::default(), + ); + assert!(res.upgrade().is_none()); + + tables::close_face(&tables, &face0); +} + #[test] fn clean_test() { let config = Config::default(); @@ -241,6 +312,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 0, &"todrop1/todrop11".into(), &sub_info, NodeId::default(), @@ -255,6 +327,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 1, &WireExpr::from(1).with_suffix("/todrop12"), &sub_info, NodeId::default(), @@ -270,7 +343,8 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - &WireExpr::from(1).with_suffix("/todrop12"), + 1, + &WireExpr::empty(), NodeId::default(), ); @@ -284,7 +358,8 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - &"todrop1/todrop11".into(), + 0, + &WireExpr::empty(), NodeId::default(), ); assert!(res1.upgrade().is_some()); @@ -302,6 +377,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 2, &"todrop3".into(), &sub_info, NodeId::default(), @@ -316,7 +392,8 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - &"todrop3".into(), + 2, + &WireExpr::empty(), NodeId::default(), ); assert!(res1.upgrade().is_some()); @@ -331,6 +408,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 3, &"todrop5".into(), &sub_info, NodeId::default(), @@ -339,6 +417,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 4, &"todrop6".into(), &sub_info, NodeId::default(), @@ -518,6 +597,7 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 0, &WireExpr::from(11).with_suffix("/**"), &sub_info, NodeId::default(), @@ -565,6 +645,7 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face1.upgrade().unwrap(), + 0, &WireExpr::from(21).with_suffix("/**"), &sub_info, NodeId::default(), @@ -612,6 +693,7 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face2.upgrade().unwrap(), + 0, &WireExpr::from(31).with_suffix("/**"), &sub_info, NodeId::default(), diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 59a4bbd96e..177906e9b1 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -31,7 +31,10 @@ pub(crate) mod common { writer::HasWriter, }; pub use zenoh_core::Resolve; + pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; + #[zenoh_macros::unstable] + pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; pub use crate::config::{self, Config, ValidatedMap}; pub use crate::handlers::IntoCallbackReceiverPair; @@ -49,6 +52,8 @@ pub(crate) mod common { pub use crate::sample::Locality; #[cfg(not(feature = "unstable"))] pub(crate) use crate::sample::Locality; + #[zenoh_macros::unstable] + pub use crate::sample::SourceInfo; pub use crate::sample::{Sample, SampleKind}; pub use crate::publication::Priority; diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 9fb4bdf6c3..2a1a58ebd9 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -13,14 +13,11 @@ // //! Publishing primitives. -use crate::encoding::Encoding; -use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; -use crate::payload::Payload; +use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleKind}; -use crate::Locality; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] @@ -30,10 +27,11 @@ use crate::{ }; use std::future::Ready; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; -use zenoh_keyexpr::keyexpr; use zenoh_protocol::network::push::ext; use zenoh_protocol::network::Mapping; use zenoh_protocol::network::Push; +#[zenoh_macros::unstable] +use zenoh_protocol::zenoh::ext::SourceInfoType; use zenoh_protocol::zenoh::Del; use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::zenoh::Put; @@ -148,6 +146,8 @@ impl SyncResolve for PutBuilder<'_, '_> { let publisher = Publisher { session, + #[cfg(feature = "unstable")] + eid: 0, // This is a one shot Publisher key_expr: key_expr?, congestion_control, priority, @@ -160,6 +160,8 @@ impl SyncResolve for PutBuilder<'_, '_> { self.kind, self.encoding, #[cfg(feature = "unstable")] + None, + #[cfg(feature = "unstable")] self.attachment, ) } @@ -241,6 +243,8 @@ impl std::fmt::Debug for PublisherRef<'_> { #[derive(Debug, Clone)] pub struct Publisher<'a> { pub(crate) session: SessionRef<'a>, + #[cfg(feature = "unstable")] + pub(crate) eid: EntityId, pub(crate) key_expr: KeyExpr<'a>, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, @@ -248,6 +252,29 @@ pub struct Publisher<'a> { } impl<'a> Publisher<'a> { + /// Returns the [`EntityGlobalId`] of this Publisher. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let publisher = session.declare_publisher("key/expression") + /// .res() + /// .await + /// .unwrap(); + /// let publisher_id = publisher.id(); + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn id(&self) -> EntityGlobalId { + EntityGlobalId { + zid: self.session.zid(), + eid: self.eid, + } + } + pub fn key_expr(&self) -> &KeyExpr<'a> { &self.key_expr } @@ -317,6 +344,8 @@ impl<'a> Publisher<'a> { kind, encoding: Encoding::ZENOH_BYTES, #[cfg(feature = "unstable")] + source_info: None, + #[cfg(feature = "unstable")] attachment: None, } } @@ -604,6 +633,8 @@ pub struct Publication<'a> { kind: SampleKind, encoding: Encoding, #[cfg(feature = "unstable")] + pub(crate) source_info: Option, + #[cfg(feature = "unstable")] pub(crate) attachment: Option, } @@ -618,6 +649,27 @@ impl<'a> Publication<'a> { self.attachment = Some(attachment); self } + + /// Send data with the given [`SourceInfo`]. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// publisher.put("Value").with_source_info(SourceInfo { + /// source_id: Some(publisher.id()), + /// source_sn: Some(0), + /// }).res().await.unwrap(); + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.source_info = Some(source_info); + self + } } impl Resolvable for Publication<'_> { @@ -632,6 +684,8 @@ impl SyncResolve for Publication<'_> { self.kind, self.encoding, #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] self.attachment, ) } @@ -661,6 +715,8 @@ impl<'a> Sink for Publisher<'a> { kind: item.kind, encoding: item.encoding, #[cfg(feature = "unstable")] + source_info: None, + #[cfg(feature = "unstable")] attachment: item.attachment, } .res_sync() @@ -784,8 +840,12 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { self.session .declare_publication_intent(key_expr.clone()) .res_sync()?; + #[cfg(feature = "unstable")] + let eid = self.session.runtime.next_id(); let publisher = Publisher { session: self.session, + #[cfg(feature = "unstable")] + eid, key_expr, congestion_control: self.congestion_control, priority: self.priority, @@ -809,6 +869,7 @@ fn resolve_put( payload: Payload, kind: SampleKind, encoding: Encoding, + #[cfg(feature = "unstable")] source_info: Option, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { log::trace!("write({:?}, [...])", &publisher.key_expr); @@ -842,6 +903,12 @@ fn resolve_put( PushBody::Put(Put { timestamp, encoding: encoding.clone().into(), + #[cfg(feature = "unstable")] + ext_sinfo: source_info.map(|s| SourceInfoType { + id: s.source_id.unwrap_or_default(), + sn: s.source_sn.unwrap_or_default() as u32, + }), + #[cfg(not(feature = "unstable"))] ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -861,6 +928,12 @@ fn resolve_put( } PushBody::Del(Del { timestamp, + #[cfg(feature = "unstable")] + ext_sinfo: source_info.map(|s| SourceInfoType { + id: s.source_id.unwrap_or_default(), + sn: s.source_sn.unwrap_or_default() as u32, + }), + #[cfg(not(feature = "unstable"))] ext_sinfo: None, ext_attachment, ext_unknown: vec![], diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 6bd78d4fc7..bd5ec81101 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -17,7 +17,6 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::DataInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; @@ -28,11 +27,9 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::{ - core::WireExpr, - network::{response, Mapping, RequestId, Response, ResponseFinal}, - zenoh::{self, ext::ValueType, reply::ReplyBody, Del, Put, ResponseBody}, -}; +use zenoh_protocol::core::{EntityId, WireExpr}; +use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFinal}; +use zenoh_protocol::zenoh::{self, ext::ValueType, reply::ReplyBody, Del, Put, ResponseBody}; use zenoh_result::ZResult; pub(crate) struct QueryInner { @@ -64,6 +61,7 @@ impl Drop for QueryInner { #[derive(Clone)] pub struct Query { pub(crate) inner: Arc, + pub(crate) eid: EntityId, } impl Query { @@ -192,22 +190,12 @@ impl SyncResolve for ReplyBuilder<'_> { kind, encoding, timestamp, - qos, #[cfg(feature = "unstable")] source_info, #[cfg(feature = "unstable")] attachment, + .. } = sample; - #[allow(unused_mut)] - let mut data_info = DataInfo { - kind, - encoding: Some(encoding), - timestamp, - qos, - source_id: None, - source_sn: None, - }; - // Use a macro for inferring the proper const extension ID between Put and Del cases macro_rules! ext_attachment { () => {{ @@ -222,21 +210,17 @@ impl SyncResolve for ReplyBuilder<'_> { ext_attachment }}; } - + #[allow(unused_mut)] + let mut ext_sinfo = None; #[cfg(feature = "unstable")] { - data_info.source_id = source_info.source_id; - data_info.source_sn = source_info.source_sn; + if source_info.source_id.is_some() || source_info.source_sn.is_some() { + ext_sinfo = Some(zenoh::put::ext::SourceInfoType { + id: source_info.source_id.unwrap_or_default(), + sn: source_info.source_sn.unwrap_or_default() as u32, + }) + } } - let ext_sinfo = if data_info.source_id.is_some() || data_info.source_sn.is_some() { - Some(zenoh::put::ext::SourceInfoType { - zid: data_info.source_id.unwrap_or_default(), - eid: 0, // @TODO use proper EntityId (#703) - sn: data_info.source_sn.unwrap_or_default() as u32, - }) - } else { - None - }; self.query.inner.primitives.send_response(Response { rid: self.query.inner.qid, wire_expr: WireExpr { @@ -249,8 +233,8 @@ impl SyncResolve for ReplyBuilder<'_> { ext_unknown: vec![], payload: match kind { SampleKind::Put => ReplyBody::Put(Put { - timestamp: data_info.timestamp, - encoding: data_info.encoding.unwrap_or_default().into(), + timestamp, + encoding: encoding.into(), ext_sinfo, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -270,7 +254,7 @@ impl SyncResolve for ReplyBuilder<'_> { ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid: self.query.inner.zid, - eid: 0, // @TODO use proper EntityId (#703) + eid: self.query.eid, }), }); Ok(()) @@ -300,7 +284,7 @@ impl SyncResolve for ReplyBuilder<'_> { ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid: self.query.inner.zid, - eid: 0, // @TODO use proper EntityId (#703) + eid: self.query.eid, }), }); Ok(()) @@ -607,6 +591,29 @@ pub struct Queryable<'a, Receiver> { } impl<'a, Receiver> Queryable<'a, Receiver> { + /// Returns the [`EntityGlobalId`] of this Queryable. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let queryable = session.declare_queryable("key/expression") + /// .res() + /// .await + /// .unwrap(); + /// let queryable_id = queryable.id(); + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn id(&self) -> EntityGlobalId { + EntityGlobalId { + zid: self.queryable.session.zid(), + eid: self.queryable.state.id, + } + } + #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { Undeclarable::undeclare_inner(self, ()) diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 543dd62e84..af4a58956d 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -15,16 +15,16 @@ //! Sample primitives use crate::encoding::Encoding; use crate::payload::Payload; -use crate::prelude::{KeyExpr, ZenohId}; +use crate::prelude::{KeyExpr, Value}; use crate::time::{new_reception_timestamp, Timestamp}; use crate::Priority; -use crate::Value; #[zenoh_macros::unstable] use serde::Serialize; use std::{ convert::{TryFrom, TryInto}, fmt, }; +use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType}; pub type SourceSn = u64; @@ -52,7 +52,7 @@ pub(crate) struct DataInfo { pub kind: SampleKind, pub encoding: Option, pub timestamp: Option, - pub source_id: Option, + pub source_id: Option, pub source_sn: Option, pub qos: QoS, } @@ -61,16 +61,24 @@ pub(crate) struct DataInfo { #[zenoh_macros::unstable] #[derive(Debug, Clone)] pub struct SourceInfo { - /// The [`ZenohId`] of the zenoh instance that published the concerned [`Sample`]. - pub source_id: Option, + /// The [`EntityGlobalId`] of the zenoh entity that published the concerned [`Sample`]. + pub source_id: Option, /// The sequence number of the [`Sample`] from the source. pub source_sn: Option, } #[test] #[cfg(feature = "unstable")] +#[cfg(not(all(target_os = "macos", target_arch = "aarch64")))] fn source_info_stack_size() { - assert_eq!(std::mem::size_of::(), 16 * 2); + assert_eq!(std::mem::size_of::(), 40); +} + +#[test] +#[cfg(feature = "unstable")] +#[cfg(all(target_os = "macos", target_arch = "aarch64"))] +fn source_info_stack_size() { + assert_eq!(std::mem::size_of::(), 48); } #[zenoh_macros::unstable] diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 87c416c209..861acf71de 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -48,7 +48,7 @@ use std::convert::TryFrom; use std::convert::TryInto; use std::fmt; use std::ops::Deref; -use std::sync::atomic::{AtomicU16, AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicU16, Ordering}; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; @@ -57,6 +57,8 @@ use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; +#[cfg(feature = "unstable")] +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::network::AtomicRequestId; use zenoh_protocol::network::RequestId; use zenoh_protocol::zenoh::reply::ReplyBody; @@ -97,9 +99,10 @@ pub(crate) struct SessionState { pub(crate) primitives: Option>, // @TODO replace with MaybeUninit ?? pub(crate) expr_id_counter: AtomicExprId, // @TODO: manage rollover and uniqueness pub(crate) qid_counter: AtomicRequestId, - pub(crate) decl_id_counter: AtomicUsize, pub(crate) local_resources: HashMap, pub(crate) remote_resources: HashMap, + #[cfg(feature = "unstable")] + pub(crate) remote_subscribers: HashMap>, //pub(crate) publications: Vec, pub(crate) subscribers: HashMap>, pub(crate) queryables: HashMap>, @@ -121,9 +124,10 @@ impl SessionState { primitives: None, expr_id_counter: AtomicExprId::new(1), // Note: start at 1 because 0 is reserved for NO_RESOURCE qid_counter: AtomicRequestId::new(0), - decl_id_counter: AtomicUsize::new(0), local_resources: HashMap::new(), remote_resources: HashMap::new(), + #[cfg(feature = "unstable")] + remote_subscribers: HashMap::new(), //publications: Vec::new(), subscribers: HashMap::new(), queryables: HashMap::new(), @@ -967,19 +971,20 @@ impl Session { ) -> ZResult> { let mut state = zwrite!(self.state); log::trace!("subscribe({:?})", key_expr); - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + let id = self.runtime.next_id(); let key_expr = match scope { Some(scope) => scope / key_expr, None => key_expr.clone(), }; - let sub_state = Arc::new(SubscriberState { + let mut sub_state = SubscriberState { id, + remote_id: id, key_expr: key_expr.clone().into_owned(), scope: scope.clone().map(|e| e.into_owned()), origin, callback, - }); + }; #[cfg(not(feature = "unstable"))] let declared_sub = origin != Locality::SessionLocal; @@ -989,29 +994,39 @@ impl Session { .as_str() .starts_with(crate::liveliness::PREFIX_LIVELINESS); - let declared_sub = declared_sub - .then(|| { - match state - .aggregated_subscribers // TODO: can this be an OwnedKeyExpr? - .iter() - .find(|s| s.includes( &key_expr)) - { - Some(join_sub) => { - let joined_sub = state.subscribers.values().any(|s| { - s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) - }); - (!joined_sub).then(|| join_sub.clone().into()) - } - None => { - let twin_sub = state - .subscribers - .values() - .any(|s| s.origin != Locality::SessionLocal && s.key_expr == key_expr); - (!twin_sub).then(|| key_expr.clone()) + let declared_sub = + declared_sub + .then(|| { + match state + .aggregated_subscribers + .iter() + .find(|s| s.includes(&key_expr)) + { + Some(join_sub) => { + if let Some(joined_sub) = state.subscribers.values().find(|s| { + s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) + }) { + sub_state.remote_id = joined_sub.remote_id; + None + } else { + Some(join_sub.clone().into()) + } + } + None => { + if let Some(twin_sub) = state.subscribers.values().find(|s| { + s.origin != Locality::SessionLocal && s.key_expr == key_expr + }) { + sub_state.remote_id = twin_sub.remote_id; + None + } else { + Some(key_expr.clone()) + } + } } - } - }) - .flatten(); + }) + .flatten(); + + let sub_state = Arc::new(sub_state); state.subscribers.insert(sub_state.id, sub_state.clone()); for res in state @@ -1064,7 +1079,7 @@ impl Session { ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: id as u32, + id, wire_expr: key_expr.to_wire(self).to_owned(), ext_info: *info, }), @@ -1080,7 +1095,7 @@ impl Session { Ok(sub_state) } - pub(crate) fn unsubscribe(&self, sid: usize) -> ZResult<()> { + pub(crate) fn unsubscribe(&self, sid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(sub_state) = state.subscribers.remove(&sid) { trace!("unsubscribe({:?})", sub_state); @@ -1110,65 +1125,28 @@ impl Session { if send_forget { // Note: there might be several Subscribers on the same KeyExpr. // Before calling forget_subscriber(key_expr), check if this was the last one. - let key_expr = &sub_state.key_expr; - match state - .aggregated_subscribers - .iter() - .find(|s| s.includes(key_expr)) - { - Some(join_sub) => { - let joined_sub = state.subscribers.values().any(|s| { - s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) - }); - if !joined_sub { - let primitives = state.primitives.as_ref().unwrap().clone(); - let wire_expr = WireExpr::from(join_sub).to_owned(); - drop(state); - primitives.send_declare(Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - - #[cfg(feature = "unstable")] - { - let state = zread!(self.state); - self.update_status_down(&state, &sub_state.key_expr) - } - } - } - None => { - let twin_sub = state - .subscribers - .values() - .any(|s| s.origin != Locality::SessionLocal && s.key_expr == *key_expr); - if !twin_sub { - let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); - primitives.send_declare(Declare { - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { - wire_expr: key_expr.to_wire(self).to_owned(), - }, - }), - }); - - #[cfg(feature = "unstable")] - { - let state = zread!(self.state); - self.update_status_down(&state, &sub_state.key_expr) - } - } + if !state.subscribers.values().any(|s| { + s.origin != Locality::SessionLocal && s.remote_id == sub_state.remote_id + }) { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: sub_state.remote_id, + ext_wire_expr: WireExprType { + wire_expr: WireExpr::empty(), + }, + }), + }); + #[cfg(feature = "unstable")] + { + let state = zread!(self.state); + self.update_status_down(&state, &sub_state.key_expr) } - }; + } } Ok(()) } else { @@ -1185,7 +1163,7 @@ impl Session { ) -> ZResult> { let mut state = zwrite!(self.state); log::trace!("queryable({:?})", key_expr); - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + let id = self.runtime.next_id(); let qable_state = Arc::new(QueryableState { id, key_expr: key_expr.to_owned(), @@ -1193,158 +1171,48 @@ impl Session { origin, callback, }); - #[cfg(feature = "complete_n")] - { - state.queryables.insert(id, qable_state.clone()); - if origin != Locality::SessionLocal && complete { - let primitives = state.primitives.as_ref().unwrap().clone(); - let complete = Session::complete_twin_qabls(&state, key_expr); - drop(state); - let qabl_info = QueryableInfo { - complete, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: id as u32, - wire_expr: key_expr.to_owned(), - ext_info: qabl_info, - }), - }); - } - } - #[cfg(not(feature = "complete_n"))] - { - let twin_qabl = Session::twin_qabl(&state, key_expr); - let complete_twin_qabl = twin_qabl && Session::complete_twin_qabl(&state, key_expr); - - state.queryables.insert(id, qable_state.clone()); + state.queryables.insert(id, qable_state.clone()); - if origin != Locality::SessionLocal && (!twin_qabl || (!complete_twin_qabl && complete)) - { - let primitives = state.primitives.as_ref().unwrap().clone(); - let complete = u8::from(!complete_twin_qabl && complete); - drop(state); - let qabl_info = QueryableInfo { - complete, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: id as u32, - wire_expr: key_expr.to_owned(), - ext_info: qabl_info, - }), - }); - } + if origin != Locality::SessionLocal { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + let qabl_info = QueryableInfo { + complete: if complete { 1 } else { 0 }, + distance: 0, + }; + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr.to_owned(), + ext_info: qabl_info, + }), + }); } Ok(qable_state) } - pub(crate) fn twin_qabl(state: &SessionState, key: &WireExpr) -> bool { - state.queryables.values().any(|q| { - q.origin != Locality::SessionLocal - && state.local_wireexpr_to_expr(&q.key_expr).unwrap() - == state.local_wireexpr_to_expr(key).unwrap() - }) - } - - #[cfg(not(feature = "complete_n"))] - pub(crate) fn complete_twin_qabl(state: &SessionState, key: &WireExpr) -> bool { - state.queryables.values().any(|q| { - q.origin != Locality::SessionLocal - && q.complete - && state.local_wireexpr_to_expr(&q.key_expr).unwrap() - == state.local_wireexpr_to_expr(key).unwrap() - }) - } - - #[cfg(feature = "complete_n")] - pub(crate) fn complete_twin_qabls(state: &SessionState, key: &WireExpr) -> u8 { - state - .queryables - .values() - .filter(|q| { - q.origin != Locality::SessionLocal - && q.complete - && state.local_wireexpr_to_expr(&q.key_expr).unwrap() - == state.local_wireexpr_to_expr(key).unwrap() - }) - .count() as u8 - } - - pub(crate) fn close_queryable(&self, qid: usize) -> ZResult<()> { + pub(crate) fn close_queryable(&self, qid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(qable_state) = state.queryables.remove(&qid) { trace!("close_queryable({:?})", qable_state); if qable_state.origin != Locality::SessionLocal { let primitives = state.primitives.as_ref().unwrap().clone(); - if Session::twin_qabl(&state, &qable_state.key_expr) { - // There still exist Queryables on the same KeyExpr. - if qable_state.complete { - #[cfg(feature = "complete_n")] - { - let complete = - Session::complete_twin_qabls(&state, &qable_state.key_expr); - drop(state); - let qabl_info = QueryableInfo { - complete, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: qable_state.key_expr.clone(), - ext_info: qabl_info, - }), - }); - } - #[cfg(not(feature = "complete_n"))] - { - if !Session::complete_twin_qabl(&state, &qable_state.key_expr) { - drop(state); - let qabl_info = QueryableInfo { - complete: 0, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: qable_state.key_expr.clone(), - ext_info: qabl_info, - }), - }); - } - } - } - } else { - // There are no more Queryables on the same KeyExpr. - drop(state); - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { - wire_expr: qable_state.key_expr.clone(), - }, - }), - }); - } + drop(state); + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: qable_state.id, + ext_wire_expr: WireExprType { + wire_expr: qable_state.key_expr.clone(), + }, + }), + }); } Ok(()) } else { @@ -1359,7 +1227,7 @@ impl Session { ) -> ZResult> { let mut state = zwrite!(self.state); log::trace!("declare_liveliness({:?})", key_expr); - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + let id = self.runtime.next_id(); let key_expr = KeyExpr::from(*crate::liveliness::KE_PREFIX_LIVELINESS / key_expr); let tok_state = Arc::new(LivelinessTokenState { id, @@ -1374,7 +1242,7 @@ impl Session { ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: id as u32, + id, wire_expr: key_expr.to_wire(self).to_owned(), ext_info: SubscriberInfo::DEFAULT, }), @@ -1383,7 +1251,7 @@ impl Session { } #[zenoh_macros::unstable] - pub(crate) fn undeclare_liveliness(&self, tid: usize) -> ZResult<()> { + pub(crate) fn undeclare_liveliness(&self, tid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(tok_state) = state.tokens.remove(&tid) { trace!("undeclare_liveliness({:?})", tok_state); @@ -1398,10 +1266,8 @@ impl Session { ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { - wire_expr: key_expr.to_wire(self).to_owned(), - }, + id: tok_state.id, + ext_wire_expr: WireExprType::null(), }), }); } @@ -1418,8 +1284,7 @@ impl Session { callback: Callback<'static, MatchingStatus>, ) -> ZResult> { let mut state = zwrite!(self.state); - - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + let id = self.runtime.next_id(); log::trace!("matches_listener({:?}) => {id}", publisher.key_expr); let listener_state = Arc::new(MatchingListenerState { id, @@ -1554,7 +1419,7 @@ impl Session { } #[zenoh_macros::unstable] - pub(crate) fn undeclare_matches_listener_inner(&self, sid: usize) -> ZResult<()> { + pub(crate) fn undeclare_matches_listener_inner(&self, sid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(state) = state.matching_listeners.remove(&sid) { trace!("undeclare_matches_listener_inner({:?})", state); @@ -1856,15 +1721,15 @@ impl Session { body: Option, #[cfg(feature = "unstable")] attachment: Option, ) { - let (primitives, key_expr, callbacks) = { + let (primitives, key_expr, queryables) = { let state = zread!(self.state); match state.wireexpr_to_keyexpr(key_expr, local) { Ok(key_expr) => { - let callbacks = state + let queryables = state .queryables - .values() + .iter() .filter( - |queryable| + |(_, queryable)| (queryable.origin == Locality::Any || (local == (queryable.origin == Locality::SessionLocal))) && @@ -1881,12 +1746,12 @@ impl Session { } } ) - .map(|qable| qable.callback.clone()) - .collect::>>(); + .map(|(id, qable)| (*id, qable.callback.clone())) + .collect::)>>(); ( state.primitives.as_ref().unwrap().clone(), key_expr.into_owned(), - callbacks, + queryables, ) } Err(err) => { @@ -1898,29 +1763,30 @@ impl Session { let parameters = parameters.to_owned(); - let zid = self.runtime.zid(); // @TODO build/use prebuilt specific zid + let zid = self.runtime.zid(); - let query = Query { - inner: Arc::new(QueryInner { - key_expr, - parameters, - value: body.map(|b| Value { - payload: b.payload.into(), - encoding: b.encoding.into(), - }), - qid, - zid, - primitives: if local { - Arc::new(self.clone()) - } else { - primitives - }, - #[cfg(feature = "unstable")] - attachment, + let query_inner = Arc::new(QueryInner { + key_expr, + parameters, + value: body.map(|b| Value { + payload: b.payload.into(), + encoding: b.encoding.into(), }), - }; - for callback in callbacks.iter() { - callback(query.clone()); + qid, + zid, + primitives: if local { + Arc::new(self.clone()) + } else { + primitives + }, + #[cfg(feature = "unstable")] + attachment, + }); + for (eid, callback) in queryables { + callback(Query { + inner: query_inner.clone(), + eid, + }); } } } @@ -2111,9 +1977,13 @@ impl Primitives for Session { trace!("recv DeclareSubscriber {} {:?}", m.id, m.wire_expr); #[cfg(feature = "unstable")] { - let state = zread!(self.state); - match state.wireexpr_to_keyexpr(&m.wire_expr, false) { + let mut state = zwrite!(self.state); + match state + .wireexpr_to_keyexpr(&m.wire_expr, false) + .map(|e| e.into_owned()) + { Ok(expr) => { + state.remote_subscribers.insert(m.id, expr.clone()); self.update_status_up(&state, &expr); if expr @@ -2141,33 +2011,30 @@ impl Primitives for Session { trace!("recv UndeclareSubscriber {:?}", m.id); #[cfg(feature = "unstable")] { - let state = zread!(self.state); - match state.wireexpr_to_keyexpr(&m.ext_wire_expr.wire_expr, false) { - Ok(expr) => { - self.update_status_down(&state, &expr); + let mut state = zwrite!(self.state); + if let Some(expr) = state.remote_subscribers.remove(&m.id) { + self.update_status_down(&state, &expr); - if expr - .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS) - { - drop(state); - let data_info = DataInfo { - kind: SampleKind::Delete, - ..Default::default() - }; - self.handle_data( - false, - &m.ext_wire_expr.wire_expr, - Some(data_info), - ZBuf::default(), - #[cfg(feature = "unstable")] - None, - ); - } - } - Err(err) => { - log::error!("Received Forget Subscriber for unkown key_expr: {}", err) + if expr + .as_str() + .starts_with(crate::liveliness::PREFIX_LIVELINESS) + { + drop(state); + let data_info = DataInfo { + kind: SampleKind::Delete, + ..Default::default() + }; + self.handle_data( + false, + &m.ext_wire_expr.wire_expr, + Some(data_info), + ZBuf::default(), + #[cfg(feature = "unstable")] + None, + ); } + } else { + log::error!("Received Undeclare Subscriber for unkown id: {}", m.id); } } } @@ -2194,7 +2061,7 @@ impl Primitives for Session { encoding: Some(m.encoding.into()), timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.zid), + source_id: m.ext_sinfo.as_ref().map(|i| i.id.clone()), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; self.handle_data( @@ -2212,7 +2079,7 @@ impl Primitives for Session { encoding: None, timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.zid), + source_id: m.ext_sinfo.as_ref().map(|i| i.id.clone()), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; self.handle_data( @@ -2272,7 +2139,7 @@ impl Primitives for Session { }, }; let replier_id = match e.ext_sinfo { - Some(info) => info.zid, + Some(info) => info.id.zid, None => ZenohId::rand(), }; let new_reply = Reply { @@ -2366,7 +2233,7 @@ impl Primitives for Session { encoding: Some(encoding.into()), timestamp, qos: QoS::from(msg.ext_qos), - source_id: ext_sinfo.as_ref().map(|i| i.zid), + source_id: ext_sinfo.as_ref().map(|i| i.id.clone()), source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), }, #[cfg(feature = "unstable")] @@ -2384,7 +2251,7 @@ impl Primitives for Session { encoding: None, timestamp, qos: QoS::from(msg.ext_qos), - source_id: ext_sinfo.as_ref().map(|i| i.zid), + source_id: ext_sinfo.as_ref().map(|i| i.id.clone()), source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), }, #[cfg(feature = "unstable")] diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index c707218017..e276d0c6d0 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -25,6 +25,8 @@ use std::future::Ready; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +#[cfg(feature = "unstable")] +use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::{subscriber::ext::SubscriberInfo, Mode}; /// The kind of reliability. @@ -32,6 +34,7 @@ pub use zenoh_protocol::core::Reliability; pub(crate) struct SubscriberState { pub(crate) id: Id, + pub(crate) remote_id: Id, pub(crate) key_expr: KeyExpr<'static>, pub(crate) scope: Option>, pub(crate) origin: Locality, @@ -741,6 +744,29 @@ impl<'a, Receiver> PullSubscriber<'a, Receiver> { } impl<'a, Receiver> Subscriber<'a, Receiver> { + /// Returns the [`EntityGlobalId`] of this Subscriber. + /// + /// # Examples + /// ``` + /// # async_std::task::block_on(async { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let subscriber = session.declare_subscriber("key/expression") + /// .res() + /// .await + /// .unwrap(); + /// let subscriber_id = subscriber.id(); + /// # }) + /// ``` + #[zenoh_macros::unstable] + pub fn id(&self) -> EntityGlobalId { + EntityGlobalId { + zid: self.subscriber.session.zid(), + eid: self.subscriber.state.id, + } + } + /// Returns the [`KeyExpr`] this Subscriber subscribes to. pub fn key_expr(&self) -> &KeyExpr<'static> { &self.subscriber.state.key_expr From f12f3382fa38af82cdd8bf75bcbb2bad4eec7f68 Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Tue, 12 Mar 2024 19:00:48 +0100 Subject: [PATCH 010/357] refactor Query.reply() (#796) * refactor Query.reply() into seprate methods:reply, reply_del and reply_err * explain #[allow(unused_mut)]; replace unwrap on KeyxExpr.try_from with ? as it was originally for Sample in zenoh/tests/routing.rs * mark Query.reply_sample as unstable * format fix --- examples/examples/z_queryable.rs | 19 +- examples/examples/z_storage.rs | 2 +- plugins/zenoh-backend-traits/Cargo.toml | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- plugins/zenoh-plugin-example/src/lib.rs | 2 +- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 6 +- .../src/replica/align_queryable.rs | 53 +-- .../src/replica/storage.rs | 6 +- zenoh-ext/src/group.rs | 6 +- zenoh-ext/src/publication_cache.rs | 6 +- zenoh/src/admin.rs | 6 +- zenoh/src/net/runtime/adminspace.rs | 30 +- zenoh/src/queryable.rs | 355 ++++++++++++------ zenoh/src/sample.rs | 6 + zenoh/src/session.rs | 12 +- zenoh/tests/attachments.rs | 4 +- zenoh/tests/routing.rs | 4 +- zenoh/tests/session.rs | 19 +- zenoh/tests/unicity.rs | 16 +- 19 files changed, 340 insertions(+), 216 deletions(-) diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index d7376835b7..12c1fc3f20 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -54,25 +54,28 @@ async fn main() { println!(">> [Queryable ] Received Query '{}' with value '{}'", query.selector(), payload); }, } - let reply = if send_errors.swap(false, Relaxed) { + if send_errors.swap(false, Relaxed) { println!( ">> [Queryable ] Replying (ERROR: '{}')", value, ); - Err(value.clone().into()) + query + .reply_err(value.clone()) + .res() + .await + .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); } else { println!( ">> [Queryable ] Responding ('{}': '{}')", key_expr.as_str(), value, ); - Ok(Sample::new(key_expr.clone(), value.clone())) + query + .reply(key_expr.clone(), value.clone()) + .res() + .await + .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); }; - query - .reply(reply) - .res() - .await - .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); }, _ = stdin.read_exact(&mut input).fuse() => { diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 5e0eaabd44..857181751b 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -67,7 +67,7 @@ async fn main() { println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { - query.reply(Ok(sample.clone())).res().await.unwrap(); + query.reply(sample.key_expr.clone(), sample.payload.clone()).res().await.unwrap(); } } }, diff --git a/plugins/zenoh-backend-traits/Cargo.toml b/plugins/zenoh-backend-traits/Cargo.toml index f2b8a4a1eb..b3926ab955 100644 --- a/plugins/zenoh-backend-traits/Cargo.toml +++ b/plugins/zenoh-backend-traits/Cargo.toml @@ -31,7 +31,7 @@ async-std = { workspace = true, features = ["default"] } async-trait = { workspace = true } derive_more = { workspace = true } serde_json = { workspace = true } -zenoh = { workspace = true } +zenoh = { workspace = true, features = ["unstable"] } zenoh-result = { workspace = true } zenoh-util = { workspace = true } schemars = { workspace = true } diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 8b9fa359e0..d17e6dfd77 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -325,6 +325,6 @@ impl Query { sample }; // Send reply - self.q.reply(Ok(sample)) + self.q.reply_sample(sample) } } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 592a08ca9b..12cc6ffa84 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -174,7 +174,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { - query.reply(Ok(sample.clone())).res().await.unwrap(); + query.reply_sample(sample.clone()).res().await.unwrap(); } } } diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index c5bdcc4c73..bb76005d6e 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -49,11 +49,7 @@ async fn main() { let receiver = queryable.receiver.clone(); async move { while let Ok(request) = receiver.recv_async().await { - request - .reply(Ok(Sample::new(key, HTML))) - .res() - .await - .unwrap(); + request.reply(key, HTML).res().await.unwrap(); } } }); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 359b8dd7e8..5fda8b576d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -95,34 +95,43 @@ impl AlignQueryable { for value in values { match value { AlignData::Interval(i, c) => { - let sample = Sample::new( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ) + .res() + .await + .unwrap(); } AlignData::Subinterval(i, c) => { - let sample = Sample::new( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ) + .res() + .await + .unwrap(); } AlignData::Content(i, c) => { - let sample = Sample::new( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ) + .res() + .await + .unwrap(); } AlignData::Data(k, (v, ts)) => { - let Value { - payload, encoding, .. - } = v; - let sample = Sample::new(k, payload) - .with_encoding(encoding) - .with_timestamp(ts); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply(k, v.payload) + .with_encoding(v.encoding) + .with_timestamp(ts) + .res() + .await + .unwrap(); } } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 1ef7e65390..6b48895612 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -532,7 +532,7 @@ impl StorageService { } else { sample }; - if let Err(e) = q.reply(Ok(sample)).res().await { + if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -570,7 +570,7 @@ impl StorageService { } else { sample }; - if let Err(e) = q.reply(Ok(sample)).res().await { + if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -583,7 +583,7 @@ impl StorageService { let err_message = format!("Storage '{}' raised an error on query: {}", self.name, e); log::warn!("{}", err_message); - if let Err(e) = q.reply(Err(err_message.into())).res().await { + if let Err(e) = q.reply_err(err_message).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 9078e61741..75a435e8f4 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -237,11 +237,7 @@ async fn query_handler(z: Arc, state: Arc) { while let Ok(query) = queryable.recv_async().await { log::trace!("Serving query for: {}", &qres); - query - .reply(Ok(Sample::new(qres.clone(), buf.clone()))) - .res() - .await - .unwrap(); + query.reply(qres.clone(), buf.clone()).res().await.unwrap(); } } diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index cd5ed964ad..1c9a286800 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -201,7 +201,7 @@ impl<'a> PublicationCache<'a> { } }, - // on query, reply with cach content + // on query, reply with cache content query = quer_recv.recv_async() => { if let Ok(query) = query { if !query.selector().key_expr.as_str().contains('*') { @@ -212,7 +212,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply(Ok(sample.clone())).res_async().await { + if let Err(e) = query.reply_sample(sample.clone()).res_async().await { log::warn!("Error replying to query: {}", e); } } @@ -226,7 +226,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply(Ok(sample.clone())).res_async().await { + if let Err(e) = query.reply_sample(sample.clone()).res_async().await { log::warn!("Error replying to query: {}", e); } } diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 5a242d51b7..268997d687 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -17,7 +17,7 @@ use crate::{ prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, sample::DataInfo, - Payload, Sample, Session, ZResult, + Payload, Session, ZResult, }; use async_std::task; use std::{ @@ -71,7 +71,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { match Payload::try_from(value) { Ok(zbuf) => { - let _ = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync(); + let _ = query.reply(key_expr, zbuf).res_sync(); } Err(e) => log::debug!("Admin query error: {}", e), } @@ -88,7 +88,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { if let Ok(value) = serde_json::value::to_value(link) { match Payload::try_from(value) { Ok(zbuf) => { - let _ = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync(); + let _ = query.reply(key_expr, zbuf).res_sync(); } Err(e) => log::debug!("Admin query error: {}", e), } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index e76475f447..b67692e704 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -17,7 +17,7 @@ use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; -use crate::prelude::sync::{Sample, SyncResolve}; +use crate::prelude::sync::SyncResolve; use crate::queryable::Query; use crate::queryable::QueryInner; use crate::value::Value; @@ -577,9 +577,8 @@ fn router_data(context: &AdminContext, query: Query) { } }; if let Err(e) = query - .reply(Ok( - Sample::new(reply_key, payload).with_encoding(Encoding::APPLICATION_JSON) - )) + .reply(reply_key, payload) + .with_encoding(Encoding::APPLICATION_JSON) .res_sync() { log::error!("Error sending AdminSpace reply: {:?}", e); @@ -609,7 +608,7 @@ zenoh_build{{version="{}"}} 1 .openmetrics_text(), ); - if let Err(e) = query.reply(Ok(Sample::new(reply_key, metrics))).res() { + if let Err(e) = query.reply(reply_key, metrics).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -622,10 +621,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.state.router.tables.tables); if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - tables.hat_code.info(&tables, WhatAmI::Router), - ))) + .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Router)) .res() { log::error!("Error sending AdminSpace reply: {:?}", e); @@ -640,10 +636,7 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.state.router.tables.tables); if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - tables.hat_code.info(&tables, WhatAmI::Peer), - ))) + .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Peer)) .res() { log::error!("Error sending AdminSpace reply: {:?}", e); @@ -660,7 +653,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Payload::empty()))).res() { + if let Err(e) = query.reply(key, Payload::empty()).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -677,7 +670,7 @@ fn queryables_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Payload::empty()))).res() { + if let Err(e) = query.reply(key, Payload::empty()).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -697,7 +690,7 @@ fn plugins_data(context: &AdminContext, query: Query) { let status = serde_json::to_value(status).unwrap(); match Payload::try_from(status) { Ok(zbuf) => { - if let Err(e) = query.reply(Ok(Sample::new(key, zbuf))).res_sync() { + if let Err(e) = query.reply(key, zbuf).res_sync() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -718,8 +711,7 @@ fn plugins_status(context: &AdminContext, query: Query) { with_extended_string(plugin_key, &["/__path__"], |plugin_path_key| { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { - if let Err(e) = query.reply(Ok(Sample::new(key_expr, plugin.path()))).res() - { + if let Err(e) = query.reply(key_expr, plugin.path()).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -743,7 +735,7 @@ fn plugins_status(context: &AdminContext, query: Query) { if let Ok(key_expr) = KeyExpr::try_from(response.key) { match Payload::try_from(response.value) { Ok(zbuf) => { - if let Err(e) = query.reply(Ok(Sample::new(key_expr, zbuf))).res_sync() { + if let Err(e) = query.reply(key_expr, zbuf).res_sync() { log::error!("Error sending AdminSpace reply: {:?}", e); } }, diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index bd5ec81101..ed3bd63b6a 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -14,9 +14,12 @@ //! Queryable primitives. +use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; +use crate::sample::QoS; +use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; @@ -26,6 +29,7 @@ use std::fmt; use std::future::Ready; use std::ops::Deref; use std::sync::Arc; +use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::{EntityId, WireExpr}; use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFinal}; @@ -96,6 +100,42 @@ impl Query { pub fn attachment(&self) -> Option<&Attachment> { self.inner.attachment.as_ref() } + /// Sends a reply in the form of [`Sample`] to this Query. + /// + /// By default, queries only accept replies whose key expression intersects with the query's. + /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), + /// replying on a disjoint key expression will result in an error when resolving the reply. + /// This api is for internal use only. + #[inline(always)] + #[cfg(feature = "unstable")] + #[doc(hidden)] + pub fn reply_sample(&self, sample: Sample) -> ReplyBuilder<'_> { + let Sample { + key_expr, + payload, + kind, + encoding, + timestamp, + qos, + #[cfg(feature = "unstable")] + source_info, + #[cfg(feature = "unstable")] + attachment, + } = sample; + ReplyBuilder { + query: self, + key_expr, + payload, + kind, + encoding, + timestamp, + qos, + #[cfg(feature = "unstable")] + source_info, + #[cfg(feature = "unstable")] + attachment, + } + } /// Sends a reply to this Query. /// @@ -103,10 +143,64 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply(&self, result: Result) -> ReplyBuilder<'_> { + pub fn reply( + &self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + ) -> ReplyBuilder<'_> + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { ReplyBuilder { query: self, - result, + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + timestamp: None, + encoding: Encoding::default(), + qos: response::ext::QoSType::RESPONSE.into(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + } + } + /// Sends a error reply to this Query. + /// + #[inline(always)] + pub fn reply_err(&self, value: IntoValue) -> ReplyErrBuilder<'_> + where + IntoValue: Into, + { + ReplyErrBuilder { + query: self, + value: value.into(), + } + } + + /// Sends a delete reply to this Query. + /// + /// By default, queries only accept replies whose key expression intersects with the query's. + /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), + /// replying on a disjoint key expression will result in an error when resolving the reply. + #[inline(always)] + pub fn reply_del(&self, key_expr: IntoKeyExpr) -> ReplyBuilder<'_> + where + IntoKeyExpr: Into>, + { + ReplyBuilder { + query: self, + key_expr: key_expr.into(), + payload: Payload::empty(), + kind: SampleKind::Delete, + timestamp: None, + encoding: Encoding::default(), + qos: response::ext::QoSType::RESPONSE.into(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, } } @@ -149,25 +243,50 @@ impl fmt::Display for Query { } } -/// A builder returned by [`Query::reply()`](Query::reply). +/// A builder returned by [`Query::reply()`](Query::reply) or [`Query::reply()`](Query::reply). #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] pub struct ReplyBuilder<'a> { query: &'a Query, - result: Result, + key_expr: KeyExpr<'static>, + payload: Payload, + kind: SampleKind, + encoding: Encoding, + timestamp: Option, + qos: QoS, + #[cfg(feature = "unstable")] + source_info: SourceInfo, + #[cfg(feature = "unstable")] + attachment: Option, +} + +/// A builder returned by [`Query::reply_err()`](Query::reply_err). +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct ReplyErrBuilder<'a> { + query: &'a Query, + value: Value, } impl<'a> ReplyBuilder<'a> { - #[allow(clippy::result_large_err)] #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Result { - match &mut self.result { - Ok(sample) => { - sample.attachment = Some(attachment); - Ok(self) - } - Err(_) => Err((self, attachment)), - } + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.attachment = Some(attachment); + self + } + #[zenoh_macros::unstable] + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.source_info = source_info; + self + } + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.timestamp = Some(timestamp); + self + } + + pub fn with_encoding(mut self, encoding: Encoding) -> Self { + self.encoding = encoding; + self } } @@ -177,119 +296,65 @@ impl<'a> Resolvable for ReplyBuilder<'a> { impl SyncResolve for ReplyBuilder<'_> { fn res_sync(self) -> ::To { - match self.result { - Ok(sample) => { - if !self.query._accepts_any_replies().unwrap_or(false) - && !self.query.key_expr().intersects(&sample.key_expr) - { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.query.key_expr()) - } - let Sample { - key_expr, - payload, - kind, - encoding, - timestamp, - #[cfg(feature = "unstable")] - source_info, - #[cfg(feature = "unstable")] - attachment, - .. - } = sample; - // Use a macro for inferring the proper const extension ID between Put and Del cases - macro_rules! ext_attachment { - () => {{ - #[allow(unused_mut)] - let mut ext_attachment = None; + if !self.query._accepts_any_replies().unwrap_or(false) + && !self.query.key_expr().intersects(&self.key_expr) + { + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", self.key_expr, self.query.key_expr()) + } + #[allow(unused_mut)] // will be unused if feature = "unstable" is not enabled + let mut ext_sinfo = None; + #[cfg(feature = "unstable")] + { + if self.source_info.source_id.is_some() || self.source_info.source_sn.is_some() { + ext_sinfo = Some(zenoh::put::ext::SourceInfoType { + id: self.source_info.source_id.unwrap_or_default(), + sn: self.source_info.source_sn.unwrap_or_default() as u32, + }) + } + } + self.query.inner.primitives.send_response(Response { + rid: self.query.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(self.key_expr.into()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Reply(zenoh::Reply { + consolidation: zenoh::Consolidation::DEFAULT, + ext_unknown: vec![], + payload: match self.kind { + SampleKind::Put => ReplyBody::Put(Put { + timestamp: self.timestamp, + encoding: self.encoding.into(), + ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm: None, #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment { - ext_attachment = Some(attachment.into()); - } - } - ext_attachment - }}; - } - #[allow(unused_mut)] - let mut ext_sinfo = None; - #[cfg(feature = "unstable")] - { - if source_info.source_id.is_some() || source_info.source_sn.is_some() { - ext_sinfo = Some(zenoh::put::ext::SourceInfoType { - id: source_info.source_id.unwrap_or_default(), - sn: source_info.source_sn.unwrap_or_default() as u32, - }) - } - } - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(key_expr.into()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Reply(zenoh::Reply { - consolidation: zenoh::Consolidation::DEFAULT, + ext_attachment: self.attachment.map(|a| a.into()), + #[cfg(not(feature = "unstable"))] + ext_attachment: None, ext_unknown: vec![], - payload: match kind { - SampleKind::Put => ReplyBody::Put(Put { - timestamp, - encoding: encoding.into(), - ext_sinfo, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment: ext_attachment!(), - ext_unknown: vec![], - payload: payload.into(), - }), - SampleKind::Delete => ReplyBody::Del(Del { - timestamp, - ext_sinfo, - ext_attachment: ext_attachment!(), - ext_unknown: vec![], - }), - }, + payload: self.payload.into(), }), - ext_qos: response::ext::QoSType::RESPONSE, - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: self.query.eid, - }), - }); - Ok(()) - } - Err(payload) => { - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Err(zenoh::Err { - timestamp: None, - is_infrastructure: false, - ext_sinfo: None, + SampleKind::Delete => ReplyBody::Del(Del { + timestamp: self.timestamp, + ext_sinfo, + #[cfg(feature = "unstable")] + ext_attachment: self.attachment.map(|a| a.into()), + #[cfg(not(feature = "unstable"))] + ext_attachment: None, ext_unknown: vec![], - ext_body: Some(ValueType { - #[cfg(feature = "shared-memory")] - ext_shm: None, - payload: payload.payload.into(), - encoding: payload.encoding.into(), - }), - code: 0, // TODO }), - ext_qos: response::ext::QoSType::RESPONSE, - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: self.query.eid, - }), - }); - Ok(()) - } - } + }, + }), + ext_qos: self.qos.into(), + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.query.inner.zid, + eid: self.query.eid, + }), + }); + Ok(()) } } @@ -301,6 +366,50 @@ impl<'a> AsyncResolve for ReplyBuilder<'a> { } } +impl<'a> Resolvable for ReplyErrBuilder<'a> { + type To = ZResult<()>; +} + +impl SyncResolve for ReplyErrBuilder<'_> { + fn res_sync(self) -> ::To { + self.query.inner.primitives.send_response(Response { + rid: self.query.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Err(zenoh::Err { + timestamp: None, + is_infrastructure: false, + ext_sinfo: None, + ext_unknown: vec![], + ext_body: Some(ValueType { + #[cfg(feature = "shared-memory")] + ext_shm: None, + payload: self.value.payload.into(), + encoding: self.value.encoding.into(), + }), + code: 0, // TODO + }), + ext_qos: response::ext::QoSType::RESPONSE, + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.query.inner.zid, + eid: self.query.eid, + }), + }); + Ok(()) + } +} +impl<'a> AsyncResolve for ReplyErrBuilder<'a> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + pub(crate) struct QueryableState { pub(crate) id: Id, pub(crate) key_expr: WireExpr<'static>, @@ -338,7 +447,7 @@ impl fmt::Debug for QueryableState { /// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); -/// query.reply(Ok(Sample::try_from("key/expression", "value").unwrap())) +/// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") /// .res() /// .await /// .unwrap(); @@ -576,7 +685,7 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// .unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); -/// query.reply(Ok(Sample::try_from("key/expression", "value").unwrap())) +/// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") /// .res() /// .await /// .unwrap(); diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index af4a58956d..9c68b460d9 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -585,3 +585,9 @@ impl From for QoS { QoS { inner: qos } } } + +impl From for QoSType { + fn from(qos: QoS) -> Self { + qos.inner + } +} diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 861acf71de..ba67e173bd 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -1852,10 +1852,10 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// .unwrap(); /// async_std::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { - /// query.reply(Ok(Sample::try_from( - /// "key/expression", + /// query.reply( + /// KeyExpr::try_from("key/expression").unwrap(), /// "value", - /// ).unwrap())).res().await.unwrap(); + /// ).res().await.unwrap(); /// } /// }).await; /// # }) @@ -2481,10 +2481,10 @@ pub trait SessionDeclarations<'s, 'a> { /// .unwrap(); /// async_std::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { - /// query.reply(Ok(Sample::try_from( - /// "key/expression", + /// query.reply( + /// KeyExpr::try_from("key/expression").unwrap(), /// "value", - /// ).unwrap())).res().await.unwrap(); + /// ).res().await.unwrap(); /// } /// }).await; /// # }) diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 89dd3e231f..0e7c1c0de7 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -73,11 +73,11 @@ fn queries() { attachment.insert(&k, &k); } query - .reply(Ok(Sample::new( + .reply( query.key_expr().clone(), query.value().unwrap().payload.clone(), ) - .with_attachment(attachment))) + .with_attachment(attachment) .res() .unwrap(); }) diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 5c96f080f8..82053b4f1d 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -115,12 +115,12 @@ impl Task { // The Queryable task keeps replying to requested messages until all checkpoints are finished. Self::Queryable(ke, payload_size) => { let queryable = session.declare_queryable(ke).res_async().await?; - let sample = Sample::try_from(ke.clone(), vec![0u8; *payload_size])?; + let payload = vec![0u8; *payload_size]; loop { futures::select! { query = queryable.recv_async() => { - query?.reply(Ok(sample.clone())).res_async().await?; + query?.reply(KeyExpr::try_from(ke.to_owned())?, payload.clone()).res_async().await?; }, _ = async_std::task::sleep(Duration::from_millis(100)).fuse() => { diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 8a3f4381d2..077c58298d 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -157,23 +157,28 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re c_msgs.fetch_add(1, Ordering::Relaxed); match query.parameters() { "ok_put" => { - let mut rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - rep.kind = SampleKind::Put; task::block_on(async { - ztimeout!(query.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(query + .reply( + KeyExpr::try_from(key_expr).unwrap(), + vec![0u8; size].to_vec() + ) + .res_async()) + .unwrap() }); } "ok_del" => { - let mut rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - rep.kind = SampleKind::Delete; task::block_on(async { - ztimeout!(query.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(query + .reply_del(KeyExpr::try_from(key_expr).unwrap()) + .res_async()) + .unwrap() }); } "err" => { let rep = Value::from(vec![0u8; size]); task::block_on(async { - ztimeout!(query.reply(Err(rep)).res_async()).unwrap() + ztimeout!(query.reply_err(rep).res_async()).unwrap() }); } _ => panic!("Unknown query parameter"), diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 76910ee5de..def0dffe33 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -196,8 +196,12 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { .declare_queryable(key_expr) .callback(move |sample| { c_msgs1.fetch_add(1, Ordering::Relaxed); - let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - task::block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }); + task::block_on(async { + ztimeout!(sample + .reply(KeyExpr::try_from(key_expr).unwrap(), vec![0u8; size]) + .res_async()) + .unwrap() + }); }) .res_async()) .unwrap(); @@ -209,8 +213,12 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { .declare_queryable(key_expr) .callback(move |sample| { c_msgs2.fetch_add(1, Ordering::Relaxed); - let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - task::block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }); + task::block_on(async { + ztimeout!(sample + .reply(KeyExpr::try_from(key_expr).unwrap(), vec![0u8; size]) + .res_async()) + .unwrap() + }); }) .res_async()) .unwrap(); From e06b46d4e39b723fb17f9cf6015e07c58b2ec710 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 13 Mar 2024 10:03:45 +0100 Subject: [PATCH 011/357] Simplify Error message (#813) --- commons/zenoh-codec/src/zenoh/err.rs | 57 +++++++++------------ commons/zenoh-protocol/src/zenoh/err.rs | 52 +++++++------------ io/zenoh-transport/src/shm.rs | 28 ++-------- zenoh/src/net/routing/dispatcher/queries.rs | 2 +- zenoh/src/queryable.rs | 19 +++---- zenoh/src/session.rs | 12 ++--- 6 files changed, 56 insertions(+), 114 deletions(-) diff --git a/commons/zenoh-codec/src/zenoh/err.rs b/commons/zenoh-codec/src/zenoh/err.rs index 5cef1a6389..b459f67b3f 100644 --- a/commons/zenoh-codec/src/zenoh/err.rs +++ b/commons/zenoh-codec/src/zenoh/err.rs @@ -11,14 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; use alloc::vec::Vec; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, + ZBuf, }; use zenoh_protocol::{ common::{iext, imsg}, + core::Encoding, zenoh::{ err::{ext, flag, Err}, id, @@ -33,33 +35,26 @@ where fn write(self, writer: &mut W, x: &Err) -> Self::Output { let Err { - code, - is_infrastructure, - timestamp, + encoding, ext_sinfo, - ext_body, ext_unknown, + payload, } = x; // Header let mut header = id::ERR; - if timestamp.is_some() { - header |= flag::T; + if encoding != &Encoding::empty() { + header |= flag::E; } - if *is_infrastructure { - header |= flag::I; - } - let mut n_exts = - (ext_sinfo.is_some() as u8) + (ext_body.is_some() as u8) + (ext_unknown.len() as u8); + let mut n_exts = (ext_sinfo.is_some() as u8) + (ext_unknown.len() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, code)?; - if let Some(ts) = timestamp.as_ref() { - self.write(&mut *writer, ts)?; + if encoding != &Encoding::empty() { + self.write(&mut *writer, encoding)?; } // Extensions @@ -67,15 +62,15 @@ where n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - if let Some(body) = ext_body.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (body, n_exts != 0))?; - } for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } + // Payload + let bodec = Zenoh080Bounded::::new(); + bodec.write(&mut *writer, payload)?; + Ok(()) } } @@ -105,16 +100,13 @@ where } // Body - let code: u16 = self.codec.read(&mut *reader)?; - let is_infrastructure = imsg::has_flag(self.header, flag::I); - let mut timestamp: Option = None; - if imsg::has_flag(self.header, flag::T) { - timestamp = Some(self.codec.read(&mut *reader)?); + let mut encoding = Encoding::empty(); + if imsg::has_flag(self.header, flag::E) { + encoding = self.codec.read(&mut *reader)?; } // Extensions let mut ext_sinfo: Option = None; - let mut ext_body: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); @@ -127,11 +119,6 @@ where ext_sinfo = Some(s); has_ext = ext; } - ext::ErrBodyType::VID | ext::ErrBodyType::SID => { - let (s, ext): (ext::ErrBodyType, bool) = eodec.read(&mut *reader)?; - ext_body = Some(s); - has_ext = ext; - } _ => { let (u, ext) = extension::read(reader, "Err", ext)?; ext_unknown.push(u); @@ -140,13 +127,15 @@ where } } + // Payload + let bodec = Zenoh080Bounded::::new(); + let payload: ZBuf = bodec.read(&mut *reader)?; + Ok(Err { - code, - is_infrastructure, - timestamp, + encoding, ext_sinfo, - ext_body, ext_unknown, + payload, }) } } diff --git a/commons/zenoh-protocol/src/zenoh/err.rs b/commons/zenoh-protocol/src/zenoh/err.rs index 648efff441..eacbb26596 100644 --- a/commons/zenoh-protocol/src/zenoh/err.rs +++ b/commons/zenoh-protocol/src/zenoh/err.rs @@ -11,43 +11,41 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::ZExtUnknown; +use crate::{common::ZExtUnknown, core::Encoding}; use alloc::vec::Vec; -use uhlc::Timestamp; +use zenoh_buffers::ZBuf; /// # Err message /// /// ```text /// Flags: -/// - T: Timestamp If T==1 then the timestamp if present -/// - I: Infrastructure If I==1 then the error is related to the infrastructure else to the user +/// - X: Reserved +/// - E: Encoding If E==1 then the encoding is present /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|I|T| ERR | +/// |Z|E|X| ERR | /// +-+-+-+---------+ -/// % code:z16 % -/// +---------------+ -/// ~ ts: ~ if T==1 +/// ~ encoding ~ if E==1 /// +---------------+ /// ~ [err_exts] ~ if Z==1 /// +---------------+ +/// ~ pl: ~ -- Payload +/// +---------------+ /// ``` pub mod flag { - pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present - pub const I: u8 = 1 << 6; // 0x40 Infrastructure if I==1 then the error is related to the infrastructure else to the user + // pub const X: u8 = 1 << 5; // 0x20 Reserved + pub const E: u8 = 1 << 6; // 0x40 Encoding if E==1 then the encoding is present pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Err { - pub code: u16, - pub is_infrastructure: bool, - pub timestamp: Option, + pub encoding: Encoding, pub ext_sinfo: Option, - pub ext_body: Option, pub ext_unknown: Vec, + pub payload: ZBuf, } pub mod ext { @@ -57,45 +55,31 @@ pub mod ext { /// Used to carry additional information about the source of data pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; - - /// # ErrBody extension - /// Used to carry a body attached to the query - /// Shared Memory extension is automatically defined by ValueType extension if - /// #[cfg(feature = "shared-memory")] is defined. - pub type ErrBodyType = crate::zenoh::ext::ValueType<{ ZExtZBuf::<0x02>::id(false) }, 0x03>; } impl Err { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; + use crate::common::iext; use rand::Rng; let mut rng = rand::thread_rng(); - let code: u16 = rng.gen(); - let is_infrastructure = rng.gen_bool(0.5); - let timestamp = rng.gen_bool(0.5).then_some({ - let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); - Timestamp::new(time, id) - }); + let encoding = Encoding::rand(); let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let ext_body = rng.gen_bool(0.5).then_some(ext::ErrBodyType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::ErrBodyType::SID) + 1, + iext::mid(ext::SourceInfo::ID) + 1, false, )); } + let payload = ZBuf::rand(rng.gen_range(0..=64)); Self { - code, - is_infrastructure, - timestamp, + encoding, ext_sinfo, - ext_body, ext_unknown, + payload, } } } diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 6f98cafc14..31910f51ae 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -18,7 +18,7 @@ use zenoh_core::{zasyncread, zasyncwrite, zerror}; use zenoh_protocol::{ network::{NetworkBody, NetworkMessage, Push, Request, Response}, zenoh::{ - err::{ext::ErrBodyType, Err}, + err::Err, ext::ShmType, query::{ext::QueryBodyType, Query}, reply::ReplyBody, @@ -123,31 +123,11 @@ impl MapShm for Reply { // Impl - Err impl MapShm for Err { fn map_to_shminfo(&mut self) -> ZResult { - if let Self { - ext_body: Some(ErrBodyType { - payload, ext_shm, .. - }), - .. - } = self - { - map_to_shminfo!(payload, ext_shm) - } else { - Ok(false) - } + Ok(false) } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { - if let Self { - ext_body: Some(ErrBodyType { - payload, ext_shm, .. - }), - .. - } = self - { - map_to_shmbuf!(payload, ext_shm, shmr) - } else { - Ok(false) - } + fn map_to_shmbuf(&mut self, _shmr: &RwLock) -> ZResult { + Ok(false) } } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 287621151a..721a98b8c2 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -521,7 +521,7 @@ macro_rules! inc_res_stats { ResponseBody::Err(e) => { stats.[<$txrx _z_reply_msgs>].[](1); stats.[<$txrx _z_reply_pl_bytes>].[]( - e.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), + e.payload.len() ); } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index ed3bd63b6a..d98df046b7 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -31,9 +31,11 @@ use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::core::{EntityId, WireExpr}; -use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFinal}; -use zenoh_protocol::zenoh::{self, ext::ValueType, reply::ReplyBody, Del, Put, ResponseBody}; +use zenoh_protocol::{ + core::{EntityId, WireExpr}, + network::{response, Mapping, RequestId, Response, ResponseFinal}, + zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, +}; use zenoh_result::ZResult; pub(crate) struct QueryInner { @@ -380,17 +382,10 @@ impl SyncResolve for ReplyErrBuilder<'_> { mapping: Mapping::Sender, }, payload: ResponseBody::Err(zenoh::Err { - timestamp: None, - is_infrastructure: false, + encoding: self.value.encoding.into(), ext_sinfo: None, ext_unknown: vec![], - ext_body: Some(ValueType { - #[cfg(feature = "shared-memory")] - ext_shm: None, - payload: self.value.payload.into(), - encoding: self.value.encoding.into(), - }), - code: 0, // TODO + payload: self.value.payload.into(), }), ext_qos: response::ext::QoSType::RESPONSE, ext_tstamp: None, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index ba67e173bd..4c303ae974 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2128,15 +2128,9 @@ impl Primitives for Session { Some(query) => { let callback = query.callback.clone(); std::mem::drop(state); - let value = match e.ext_body { - Some(body) => Value { - payload: body.payload.into(), - encoding: body.encoding.into(), - }, - None => Value { - payload: Payload::empty(), - encoding: Encoding::default(), - }, + let value = Value { + payload: e.payload.into(), + encoding: e.encoding.into(), }; let replier_id = match e.ext_sinfo { Some(info) => info.id.zid, From 55119a5001f0080bfd78a91733760a45a959674c Mon Sep 17 00:00:00 2001 From: Denis Biryukov Date: Wed, 13 Mar 2024 15:07:59 +0100 Subject: [PATCH 012/357] make Sample ields pub(crate) provide accessors for external users --- examples/examples/z_get.rs | 4 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pull.rs | 6 +- examples/examples/z_storage.rs | 15 ++- examples/examples/z_sub.rs | 4 +- plugins/zenoh-plugin-example/src/lib.rs | 6 +- plugins/zenoh-plugin-rest/src/lib.rs | 22 ++--- .../src/replica/align_queryable.rs | 10 +- .../src/replica/aligner.rs | 16 ++-- .../src/replica/mod.rs | 11 ++- .../src/replica/storage.rs | 84 +++++++++-------- .../tests/operations.rs | 8 +- .../tests/wildcard.rs | 20 ++-- zenoh-ext/examples/z_query_sub.rs | 4 +- zenoh-ext/src/group.rs | 4 +- zenoh-ext/src/publication_cache.rs | 8 +- zenoh-ext/src/querying_subscriber.rs | 4 +- zenoh/src/liveliness.rs | 14 +-- zenoh/src/payload.rs | 4 +- zenoh/src/sample.rs | 93 +++++++++++++------ zenoh/src/subscriber.rs | 12 +-- zenoh/tests/attachments.rs | 4 +- zenoh/tests/events.rs | 20 ++-- zenoh/tests/interceptors.rs | 6 +- zenoh/tests/liveliness.rs | 8 +- zenoh/tests/qos.rs | 4 +- zenoh/tests/routing.rs | 4 +- zenoh/tests/session.rs | 10 +- zenoh/tests/unicity.rs | 6 +- 29 files changed, 224 insertions(+), 189 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 0fff95c250..dce74d367b 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -41,12 +41,12 @@ async fn main() { match reply.sample { Ok(sample) => { let payload = sample - .payload + .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!( ">> Received ('{}': '{}')", - sample.key_expr.as_str(), + sample.key_expr().as_str(), payload, ); } diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 1f06c7abb9..6c333cbbeb 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -41,7 +41,7 @@ fn main() { let _sub = session .declare_subscriber(key_expr_ping) - .callback(move |sample| publisher.put(sample.payload).res().unwrap()) + .callback(move |sample| publisher.put(sample.payload().clone()).res().unwrap()) .res() .unwrap(); for _ in stdin().bytes().take_while(|b| !matches!(b, Ok(b'q'))) {} diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index ed2a90f1a6..5ba4f413bd 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -45,13 +45,13 @@ async fn main() { let subs = async { while let Ok(sample) = subscriber.recv_async().await { let payload = sample - .payload + .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!( ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, - sample.key_expr.as_str(), + sample.kind(), + sample.key_expr().as_str(), payload, ); } diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 857181751b..ab62785f18 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -53,13 +53,12 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); - println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(),payload); - if sample.kind == SampleKind::Delete { - stored.remove(&sample.key_expr.to_string()); - } else { - stored.insert(sample.key_expr.to_string(), sample); - } + let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), sample.key_expr().as_str(),payload); + match sample.kind() { + SampleKind::Delete => stored.remove(&sample.key_expr().to_string()), + SampleKind::Put => stored.insert(sample.key_expr().to_string(), sample), + }; }, query = queryable.recv_async() => { @@ -67,7 +66,7 @@ async fn main() { println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { - query.reply(sample.key_expr.clone(), sample.payload.clone()).res().await.unwrap(); + query.reply(sample.key_expr().clone(), sample.payload().clone()).res().await.unwrap(); } } }, diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 195e2f7640..f2d337a7cf 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -46,8 +46,8 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); - println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(), payload); + let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), sample.key_expr().as_str(), payload); }, _ = stdin.read_exact(&mut input).fuse() => { match input[0] { diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 12cc6ffa84..04f49b4739 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -164,9 +164,9 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // on sample received by the Subscriber sample = sub.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); - info!("Received data ('{}': '{}')", sample.key_expr, payload); - stored.insert(sample.key_expr.to_string(), sample); + let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + info!("Received data ('{}': '{}')", sample.key_expr(), payload); + stored.insert(sample.key_expr().to_string(), sample); }, // on query received by the Queryable query = queryable.recv_async() => { diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 1a99d7b5a4..c689bc7d7d 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -46,7 +46,7 @@ lazy_static::lazy_static! { } const RAW_KEY: &str = "_raw"; -fn payload_to_json(payload: Payload) -> String { +fn payload_to_json(payload: &Payload) -> String { payload .deserialize::() .unwrap_or_else(|_| format!(r#""{}""#, b64_std_engine.encode(payload.contiguous()))) @@ -55,10 +55,10 @@ fn payload_to_json(payload: Payload) -> String { fn sample_to_json(sample: Sample) -> String { format!( r#"{{ "key": "{}", "value": {}, "encoding": "{}", "time": "{}" }}"#, - sample.key_expr.as_str(), - payload_to_json(sample.payload), - sample.encoding, - if let Some(ts) = sample.timestamp { + sample.key_expr().as_str(), + payload_to_json(sample.payload()), + sample.encoding(), + if let Some(ts) = sample.timestamp() { ts.to_string() } else { "None".to_string() @@ -72,7 +72,7 @@ fn result_to_json(sample: Result) -> String { Err(err) => { format!( r#"{{ "key": "ERROR", "value": {}, "encoding": "{}"}}"#, - payload_to_json(err.payload), + payload_to_json(&err.payload), err.encoding, ) } @@ -100,8 +100,8 @@ async fn to_json_response(results: flume::Receiver) -> Response { fn sample_to_html(sample: Sample) -> String { format!( "
{}
\n
{}
\n", - sample.key_expr.as_str(), - String::from_utf8_lossy(&sample.payload.contiguous()) + sample.key_expr().as_str(), + String::from_utf8_lossy(&sample.payload().contiguous()) ) } @@ -136,8 +136,8 @@ async fn to_raw_response(results: flume::Receiver) -> Response { Ok(reply) => match reply.sample { Ok(sample) => response( StatusCode::Ok, - Cow::from(&sample.encoding).as_ref(), - String::from_utf8_lossy(&sample.payload.contiguous()).as_ref(), + Cow::from(sample.encoding()).as_ref(), + String::from_utf8_lossy(&sample.payload().contiguous()).as_ref(), ), Err(value) => response( StatusCode::Ok, @@ -322,7 +322,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { log::trace!( "[ALIGN QUERYABLE] Received ('{}': '{}')", - sample.key_expr.as_str(), - StringOrBase64::from(sample.payload.clone()) + sample.key_expr().as_str(), + StringOrBase64::from(sample.payload()) ); - if let Some(timestamp) = sample.timestamp { + if let Some(timestamp) = sample.timestamp() { match timestamp.cmp(&logentry.timestamp) { Ordering::Greater => return None, Ordering::Less => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 03c6fa949a..b11a94e4f2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -141,10 +141,10 @@ impl Aligner { for sample in replies { result.insert( - sample.key_expr.into(), + sample.key_expr().clone().into(), ( - sample.timestamp.unwrap(), - Value::new(sample.payload).with_encoding(sample.encoding), + sample.timestamp().unwrap().clone(), + Value::from(sample), ), ); } @@ -213,7 +213,7 @@ impl Aligner { let mut other_intervals: HashMap = HashMap::new(); // expecting sample.payload to be a vec of intervals with their checksum for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload)) { + match serde_json::from_str(&StringOrBase64::from(each.payload())) { Ok((i, c)) => { other_intervals.insert(i, c); } @@ -259,7 +259,7 @@ impl Aligner { let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_subintervals: HashMap = HashMap::new(); for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload)) { + match serde_json::from_str(&StringOrBase64::from(each.payload())) { Ok((i, c)) => { other_subintervals.insert(i, c); } @@ -300,7 +300,7 @@ impl Aligner { let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_content: HashMap> = HashMap::new(); for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload)) { + match serde_json::from_str(&StringOrBase64::from(each.payload())) { Ok((i, c)) => { other_content.insert(i, c); } @@ -340,8 +340,8 @@ impl Aligner { Ok(sample) => { log::trace!( "[ALIGNER] Received ('{}': '{}')", - sample.key_expr.as_str(), - StringOrBase64::from(sample.payload.clone()) + sample.key_expr().as_str(), + StringOrBase64::from(sample.payload()) ); return_val.push(sample); } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 78254213f7..5dda032029 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -220,16 +220,17 @@ impl Replica { continue; } }; - let from = &sample.key_expr.as_str() + let from = &sample.key_expr().as_str() [Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX).len() + 1..]; log::trace!( "[DIGEST_SUB] From {} Received {} ('{}': '{}')", from, - sample.kind, - sample.key_expr.as_str(), - StringOrBase64::from(sample.payload.clone()) + sample.kind(), + sample.key_expr().as_str(), + StringOrBase64::from(sample.payload()) ); - let digest: Digest = match serde_json::from_str(&StringOrBase64::from(sample.payload)) { + let digest: Digest = match serde_json::from_str(&StringOrBase64::from(sample.payload())) + { Ok(digest) => digest, Err(e) => { log::error!("[DIGEST_SUB] Error in decoding the digest: {}", e); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 6b48895612..895f2e1914 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -179,7 +179,7 @@ impl StorageService { }; // log error if the sample is not timestamped // This is to reduce down the line inconsistencies of having duplicate samples stored - if sample.get_timestamp().is_none() { + if sample.timestamp().is_none() { log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { @@ -271,28 +271,28 @@ impl StorageService { }; // if wildcard, update wildcard_updates - if sample.key_expr.is_wild() { + if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr.is_wild() { - self.get_matching_keys(&sample.key_expr).await + let matching_keys = if sample.key_expr().is_wild() { + self.get_matching_keys(&sample.key_expr()).await } else { - vec![sample.key_expr.clone().into()] + vec![sample.key_expr().clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr, + sample.key_expr(), matching_keys ); for k in matching_keys { if !self - .is_deleted(&k.clone(), sample.get_timestamp().unwrap()) + .is_deleted(&k.clone(), sample.timestamp().unwrap()) .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, sample.get_timestamp().unwrap()).await)) + && self.is_latest(&k, sample.timestamp().unwrap()).await)) { log::trace!( "Sample `{:?}` identified as neded processing for key {}", @@ -302,30 +302,30 @@ impl StorageService { // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage let sample_to_store = match self - .ovderriding_wild_update(&k, sample.get_timestamp().unwrap()) + .ovderriding_wild_update(&k, sample.timestamp().unwrap()) .await { Some(overriding_update) => { let Value { payload, encoding, .. } = overriding_update.data.value; - let mut sample_to_store = Sample::new(KeyExpr::from(k.clone()), payload) + let sample_to_store = Sample::new(KeyExpr::from(k.clone()), payload) .with_encoding(encoding) - .with_timestamp(overriding_update.data.timestamp); - sample_to_store.kind = overriding_update.kind; + .with_timestamp(overriding_update.data.timestamp) + .with_kind(overriding_update.kind); sample_to_store } None => { - let mut sample_to_store = - Sample::new(KeyExpr::from(k.clone()), sample.payload.clone()) - .with_encoding(sample.encoding.clone()) - .with_timestamp(sample.timestamp.unwrap()); - sample_to_store.kind = sample.kind; + let sample_to_store = + Sample::new(KeyExpr::from(k.clone()), sample.payload().clone()) + .with_encoding(sample.encoding().clone()) + .with_timestamp(sample.timestamp().unwrap().clone()) + .with_kind(sample.kind()); sample_to_store } }; - let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { + let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -333,24 +333,25 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = if sample.kind == SampleKind::Put { - storage - .put( - stripped_key, - Value::new(sample_to_store.payload.clone()) - .with_encoding(sample_to_store.encoding.clone()), - sample_to_store.timestamp.unwrap(), - ) - .await - } else if sample.kind == SampleKind::Delete { - // register a tombstone - self.mark_tombstone(&k, sample_to_store.timestamp.unwrap()) - .await; - storage - .delete(stripped_key, sample_to_store.timestamp.unwrap()) - .await - } else { - Err("sample kind not implemented".into()) + let result = match sample.kind() { + SampleKind::Put => { + storage + .put( + stripped_key, + Value::new(sample_to_store.payload().clone()) + .with_encoding(sample_to_store.encoding().clone()), + sample_to_store.timestamp().unwrap().clone(), + ) + .await + } + SampleKind::Delete => { + // register a tombstone + self.mark_tombstone(&k, sample_to_store.timestamp().unwrap().clone()) + .await; + storage + .delete(stripped_key, sample_to_store.timestamp().unwrap().clone()) + .await + } }; drop(storage); if self.replication.is_some() @@ -362,7 +363,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), *sample_to_store.get_timestamp().unwrap())); + .send((k.clone(), sample_to_store.timestamp().unwrap().clone())); match sending { Ok(_) => (), Err(e) => { @@ -395,15 +396,16 @@ impl StorageService { async fn register_wildcard_update(&self, sample: Sample) { // @TODO: change into a better store that does incremental writes - let key = sample.clone().key_expr; + let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; + let timestamp = sample.timestamp().unwrap().clone(); wildcards.insert( &key, Update { - kind: sample.kind, + kind: sample.kind(), data: StoredData { - value: Value::new(sample.payload).with_encoding(sample.encoding), - timestamp: sample.timestamp.unwrap(), + value: Value::from(sample), + timestamp, }, }, ); diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 81029e2fa7..36162f01c2 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -101,7 +101,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/a").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "1"); + assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "1"); put_data( &session, @@ -117,7 +117,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); + assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); delete_data( &session, @@ -136,8 +136,8 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); - assert_eq!(data[0].key_expr.as_str(), "operation/test/b"); + assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); + assert_eq!(data[0].key_expr().as_str(), "operation/test/b"); drop(storage); } diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 4808ec246f..5a71dc23f0 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -117,8 +117,8 @@ async fn test_wild_card_in_order() { // expected single entry let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 1); - assert_eq!(data[0].key_expr.as_str(), "wild/test/a"); - assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "2"); + assert_eq!(data[0].key_expr().as_str(), "wild/test/a"); + assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); put_data( &session, @@ -134,10 +134,10 @@ async fn test_wild_card_in_order() { // expected two entries let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 2); - assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); - assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); - assert!(["2", "3"].contains(&StringOrBase64::from(data[0].payload.clone()).as_str())); - assert!(["2", "3"].contains(&StringOrBase64::from(data[1].payload.clone()).as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr().as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr().as_str())); + assert!(["2", "3"].contains(&StringOrBase64::from(data[0].payload()).as_str())); + assert!(["2", "3"].contains(&StringOrBase64::from(data[1].payload()).as_str())); put_data( &session, @@ -153,10 +153,10 @@ async fn test_wild_card_in_order() { // expected two entries let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 2); - assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); - assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); - assert_eq!(StringOrBase64::from(data[0].payload.clone()).as_str(), "4"); - assert_eq!(StringOrBase64::from(data[1].payload.clone()).as_str(), "4"); + assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr().as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr().as_str())); + assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "4"); + assert_eq!(StringOrBase64::from(data[1].payload()).as_str(), "4"); delete_data( &session, diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index 80efc0854f..8c1307d712 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -60,8 +60,8 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload.deserialize::().unwrap_or_else(|e| format!("{}", e)); - println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind, sample.key_expr.as_str(), payload); + let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), sample.key_expr().as_str(), payload); }, _ = stdin.read_exact(&mut input).fuse() => { diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 75a435e8f4..41007d8b87 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -248,7 +248,7 @@ async fn net_event_handler(z: Arc, state: Arc) { .await .unwrap(); while let Ok(s) = sub.recv_async().await { - match bincode::deserialize::(&(s.payload.contiguous())) { + match bincode::deserialize::(&(s.payload().contiguous())) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { log::debug!("Member join: {:?}", &je.member); @@ -308,7 +308,7 @@ async fn net_event_handler(z: Arc, state: Arc) { match reply.sample { Ok(sample) => { match bincode::deserialize::( - &sample.payload.contiguous(), + &sample.payload().contiguous(), ) { Ok(m) => { let mut expiry = Instant::now(); diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 1c9a286800..85cb96cce2 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -180,9 +180,9 @@ impl<'a> PublicationCache<'a> { sample = sub_recv.recv_async() => { if let Ok(sample) = sample { let queryable_key_expr: KeyExpr<'_> = if let Some(prefix) = &queryable_prefix { - prefix.join(&sample.key_expr).unwrap().into() + prefix.join(sample.key_expr()).unwrap().into() } else { - sample.key_expr.clone() + sample.key_expr().clone() }; if let Some(queue) = cache.get_mut(queryable_key_expr.as_keyexpr()) { @@ -207,7 +207,7 @@ impl<'a> PublicationCache<'a> { if !query.selector().key_expr.as_str().contains('*') { if let Some(queue) = cache.get(query.selector().key_expr.as_keyexpr()) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp) { + if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } @@ -221,7 +221,7 @@ impl<'a> PublicationCache<'a> { for (key_expr, queue) in cache.iter() { if query.selector().key_expr.intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp) { + if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 2c89ec82ae..470f795f2b 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -304,8 +304,8 @@ impl MergeQueue { } fn push(&mut self, sample: Sample) { - if let Some(ts) = sample.timestamp { - self.timstamped.entry(ts).or_insert(sample); + if let Some(ts) = sample.timestamp() { + self.timstamped.entry(ts.clone()).or_insert(sample); } else { self.untimestamped.push_back(sample); } diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 9f14866363..d4229db4cc 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -131,9 +131,9 @@ impl<'a> Liveliness<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session.liveliness().declare_subscriber("key/expression").res().await.unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// match sample.kind { - /// SampleKind::Put => println!("New liveliness: {}", sample.key_expr), - /// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr), + /// match sample.kind() { + /// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), + /// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr()), /// } /// } /// # }) @@ -169,7 +169,7 @@ impl<'a> Liveliness<'a> { /// let replies = session.liveliness().get("key/expression").res().await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { /// if let Ok(sample) = reply.sample { - /// println!(">> Liveliness token {}", sample.key_expr); + /// println!(">> Liveliness token {}", sample.key_expr()); /// } /// } /// # }) @@ -425,7 +425,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .res() /// .await /// .unwrap(); @@ -499,7 +499,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {:?}", sample.key_expr, sample.payload); + /// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); /// } /// # }) /// ``` @@ -593,7 +593,7 @@ where /// .unwrap(); /// while let Ok(token) = tokens.recv_async().await { /// match token.sample { -/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr.as_str()), +/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr().as_str()), /// Err(err) => println!("Received (ERROR: '{:?}')", err.payload), /// } /// } diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index f499db50da..62f40f9294 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -579,8 +579,8 @@ impl std::fmt::Display for StringOrBase64 { } } -impl From for StringOrBase64 { - fn from(v: Payload) -> Self { +impl From<&Payload> for StringOrBase64 { + fn from(v: &Payload) -> Self { use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; match v.deserialize::() { Ok(s) => StringOrBase64::String(s), diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 9c68b460d9..1ac04313ab 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -363,38 +363,18 @@ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[non_exhaustive] #[derive(Clone, Debug)] pub struct Sample { - /// The key expression on which this Sample was published. - pub key_expr: KeyExpr<'static>, - /// The payload of this Sample. - pub payload: Payload, - /// The kind of this Sample. - pub kind: SampleKind, - /// The encoding of this sample - pub encoding: Encoding, - /// The [`Timestamp`] of this Sample. - pub timestamp: Option, - /// Quality of service settings this sample was sent with. - pub qos: QoS, + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) payload: Payload, + pub(crate) kind: SampleKind, + pub(crate) encoding: Encoding, + pub(crate) timestamp: Option, + pub(crate) qos: QoS, #[cfg(feature = "unstable")] - ///
- /// 🔬 - /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. - /// To use it, you must enable zenoh's unstable feature flag. - ///
- /// - /// Infos on the source of this Sample. - pub source_info: SourceInfo, + pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] - ///
- /// 🔬 - /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. - /// To use it, you must enable zenoh's unstable feature flag. - ///
- /// - /// A map of key-value pairs, where each key and value are byte-slices. - pub attachment: Option, + pub(crate) attachment: Option, } impl Sample { @@ -471,19 +451,67 @@ impl Sample { self } + /// Gets the key expression on which this Sample was published. + #[inline] + pub fn key_expr(&self) -> &KeyExpr<'static> { + &self.key_expr + } + + /// Gets the payload of this Sample. + #[inline] + pub fn payload(&self) -> &Payload { + &self.payload + } + + /// Gets the kind of this Sample. + #[inline] + pub fn kind(&self) -> SampleKind { + self.kind + } + + /// Sets the kind of this Sample. + #[inline] + #[doc(hidden)] + #[zenoh_macros::unstable] + pub fn with_kind(mut self, kind: SampleKind) -> Self { + self.kind = kind; + self + } + + /// Gets the encoding of this sample + #[inline] + pub fn encoding(&self) -> &Encoding { + &self.encoding + } + /// Gets the timestamp of this Sample. #[inline] - pub fn get_timestamp(&self) -> Option<&Timestamp> { + pub fn timestamp(&self) -> Option<&Timestamp> { self.timestamp.as_ref() } /// Sets the timestamp of this Sample. #[inline] + #[doc(hidden)] + #[zenoh_macros::unstable] pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { self.timestamp = Some(timestamp); self } + /// Gets the quality of service settings this Sample was sent with. + #[inline] + pub fn qos(&self) -> &QoS { + &self.qos + } + + /// Gets infos on the source of this Sample. + #[zenoh_macros::unstable] + #[inline] + pub fn source_info(&self) -> &SourceInfo { + &self.source_info + } + /// Sets the source info of this Sample. #[zenoh_macros::unstable] #[inline] @@ -506,17 +534,22 @@ impl Sample { } } + /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. #[zenoh_macros::unstable] + #[inline] pub fn attachment(&self) -> Option<&Attachment> { self.attachment.as_ref() } + /// Gets the mutable sample attachment: a map of key-value pairs, where each key and value are byte-slices. #[zenoh_macros::unstable] + #[inline] pub fn attachment_mut(&mut self) -> &mut Option { &mut self.attachment } - #[allow(clippy::result_large_err)] + #[inline] + #[doc(hidden)] #[zenoh_macros::unstable] pub fn with_attachment(mut self, attachment: Attachment) -> Self { self.attachment = Some(attachment); diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index e276d0c6d0..d4c3257472 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -67,7 +67,7 @@ impl fmt::Debug for SubscriberState { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload) }) +/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()) }) /// .res() /// .await /// .unwrap(); @@ -100,7 +100,7 @@ pub(crate) struct SubscriberInner<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) +/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .pull_mode() /// .res() /// .await @@ -123,7 +123,7 @@ impl<'a> PullSubscriberInner<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .pull_mode() /// .res() /// .await @@ -332,7 +332,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr, sample.payload); }) + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .res() /// .await /// .unwrap(); @@ -407,7 +407,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {:?}", sample.key_expr, sample.payload); + /// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); /// } /// # }) /// ``` @@ -636,7 +636,7 @@ where /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { -/// println!("Received: {} {:?}", sample.key_expr, sample.payload); +/// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); /// } /// # }) /// ``` diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 0e7c1c0de7..38d03b0a84 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -9,9 +9,9 @@ fn pubsub() { .callback(|sample| { println!( "{}", - std::str::from_utf8(&sample.payload.contiguous()).unwrap() + std::str::from_utf8(&sample.payload().contiguous()).unwrap() ); - for (k, v) in &sample.attachment.unwrap() { + for (k, v) in sample.attachment().unwrap() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } }) diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 0ea775784a..5823b16150 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -69,15 +69,15 @@ fn zenoh_events() { let sample = ztimeout!(sub1.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Put); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); let sample = ztimeout!(sub2.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Put); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); let replies: Vec = ztimeout!(session .get(format!("@/session/{zid}/transport/unicast/*")) @@ -87,7 +87,7 @@ fn zenoh_events() { .collect(); assert!(replies.len() == 1); assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = replies[0].sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); let replies: Vec = ztimeout!(session @@ -98,22 +98,22 @@ fn zenoh_events() { .collect(); assert!(replies.len() == 1); assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = replies[0].sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); close_session(session2).await; let sample = ztimeout!(sub1.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); let sample = ztimeout!(sub2.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); sub2.undeclare().res().await.unwrap(); sub1.undeclare().res().await.unwrap(); diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 2a5c30e7b8..1f502138e4 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -83,9 +83,9 @@ fn downsampling_by_keyexpr_impl(egress: bool) { .callback(move |sample| { let mut count = zlock!(total_count_clone); *count += 1; - if sample.key_expr.as_str() == "test/downsamples_by_keyexp/r100" { + if sample.key_expr().as_str() == "test/downsamples_by_keyexp/r100" { zlock!(counter_r100).tick(); - } else if sample.key_expr.as_str() == "test/downsamples_by_keyexp/r50" { + } else if sample.key_expr().as_str() == "test/downsamples_by_keyexp/r50" { zlock!(counter_r50).tick(); } }) @@ -191,7 +191,7 @@ fn downsampling_by_interface_impl(egress: bool) { .callback(move |sample| { let mut count = zlock!(total_count_clone); *count += 1; - if sample.key_expr.as_str() == "test/downsamples_by_interface/r100" { + if sample.key_expr().as_str() == "test/downsamples_by_interface/r100" { zlock!(counter_r100).tick(); } }) diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 96cca533df..c55eed4bc4 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -72,14 +72,14 @@ fn zenoh_liveliness() { .res_async()) .unwrap(); let sample = ztimeout!(replies.recv_async()).unwrap().sample.unwrap(); - assert!(sample.kind == SampleKind::Put); - assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == "zenoh_liveliness_test"); assert!(ztimeout!(replies.recv_async()).is_err()); let sample = ztimeout!(sub.recv_async()).unwrap(); - assert!(sample.kind == SampleKind::Put); - assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == "zenoh_liveliness_test"); drop(token); diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 475d8d7a1b..24119e7b1e 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -52,13 +52,13 @@ fn pubsub() { task::sleep(SLEEP).await; ztimeout!(publisher1.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; + let qos = ztimeout!(subscriber.recv_async()).unwrap().qos().clone(); assert_eq!(qos.priority(), Priority::DataHigh); assert_eq!(qos.congestion_control(), CongestionControl::Drop); ztimeout!(publisher2.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; + let qos = ztimeout!(subscriber.recv_async()).unwrap().qos().clone(); assert_eq!(qos.priority(), Priority::DataLow); assert_eq!(qos.congestion_control(), CongestionControl::Block); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 82053b4f1d..06a8f5da45 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -58,7 +58,7 @@ impl Task { let sub = ztimeout!(session.declare_subscriber(ke).res_async())?; let mut counter = 0; while let Ok(sample) = sub.recv_async().await { - let recv_size = sample.payload.len(); + let recv_size = sample.payload().len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } @@ -91,7 +91,7 @@ impl Task { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - let recv_size = sample.payload.len(); + let recv_size = sample.payload().len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 077c58298d..e3f5e2df63 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -95,7 +95,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re let sub = ztimeout!(peer01 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.payload.len(), size); + assert_eq!(sample.payload().len(), size); c_msgs.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -198,8 +198,8 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.sample.unwrap(); - assert_eq!(s.kind, SampleKind::Put); - assert_eq!(s.payload.len(), size); + assert_eq!(s.kind(), SampleKind::Put); + assert_eq!(s.payload().len(), size); cnt += 1; } } @@ -216,8 +216,8 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.sample.unwrap(); - assert_eq!(s.kind, SampleKind::Delete); - assert_eq!(s.payload.len(), 0); + assert_eq!(s.kind(), SampleKind::Delete); + assert_eq!(s.payload().len(), 0); cnt += 1; } } diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index def0dffe33..8eb007b0c0 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -114,7 +114,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { let sub1 = ztimeout!(s01 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.payload.len(), size); + assert_eq!(sample.payload().len(), size); c_msgs1.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -126,7 +126,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { let sub2 = ztimeout!(s02 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.payload.len(), size); + assert_eq!(sample.payload().len(), size); c_msgs2.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -232,7 +232,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { for _ in 0..msg_count { let rs = ztimeout!(s03.get(key_expr).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().payload.len(), size); + assert_eq!(s.sample.unwrap().payload().len(), size); cnt += 1; } } From cc68ffb8f0f3d8b429ffcdab6230d1a5cbb79a8a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 13 Mar 2024 15:10:45 +0100 Subject: [PATCH 013/357] remove Publisher::write (#819) * build plugins with default zenoh features * update documentation to the new api for keformat's generated Parsed (#783) * fix: Relax dependency requirements (#758) - async-io - unix-named-pipe - filepath - advisory-lock * feat: Improve release workflow (#756) * wip: Improve Release workflow * feat: Add DockerHub & GHCR releases * feat: Refactor checks and tests into pre-release workflow * chore: Remove crates_check.sh and crates_publish.sh * fix: Remove Dockerfile * restore SN in case of frame drops caused by congestion control (#815) * remove Publisher::write * test fix * remove unrelated changes added by rebasing --------- Co-authored-by: Pierre Avital Co-authored-by: Mahmoud Mazouz Co-authored-by: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> --- zenoh/src/publication.rs | 29 ++++++++--------------------- 1 file changed, 8 insertions(+), 21 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 2a1a58ebd9..f12842d081 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -350,25 +350,6 @@ impl<'a> Publisher<'a> { } } - /// Send data with [`kind`](SampleKind) (Put or Delete). - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.write(SampleKind::Put, "payload").res().await.unwrap(); - /// # }) - /// ``` - pub fn write(&self, kind: SampleKind, value: IntoPayload) -> Publication - where - IntoPayload: Into, - { - self._write(kind, value.into()) - } - /// Put data. /// /// # Examples @@ -1451,11 +1432,17 @@ mod tests { let session = open(Config::default()).res().unwrap(); let sub = session.declare_subscriber(KEY_EXPR).res().unwrap(); let pub_ = session.declare_publisher(KEY_EXPR).res().unwrap(); - pub_.write(kind, VALUE).res().unwrap(); + + match kind { + SampleKind::Put => pub_.put(VALUE).res().unwrap(), + SampleKind::Delete => pub_.delete().res().unwrap(), + } let sample = sub.recv().unwrap(); assert_eq!(sample.kind, kind); - assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); + if let SampleKind::Put = kind { + assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); + } } sample_kind_integrity_in_publication_with(SampleKind::Put); From f3af52ac0f1787d3eff29ef82c5f00e695c249e2 Mon Sep 17 00:00:00 2001 From: Denis Biryukov Date: Wed, 13 Mar 2024 15:17:37 +0100 Subject: [PATCH 014/357] format and clippy --- examples/examples/z_get_liveliness.rs | 2 +- examples/examples/z_sub_liveliness.rs | 6 ++-- .../src/replica/align_queryable.rs | 5 +--- .../src/replica/aligner.rs | 5 +--- .../src/replica/storage.rs | 29 ++++++++----------- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/tests/qos.rs | 4 +-- 7 files changed, 21 insertions(+), 32 deletions(-) diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 036dc0ab98..66de570356 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -37,7 +37,7 @@ async fn main() { .unwrap(); while let Ok(reply) = replies.recv_async().await { match reply.sample { - Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr.as_str(),), + Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr().as_str(),), Err(err) => { let payload = err .payload diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 52ba53875c..02e2e71ba4 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -46,13 +46,13 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - match sample.kind { + match sample.kind() { SampleKind::Put => println!( ">> [LivelinessSubscriber] New alive token ('{}')", - sample.key_expr.as_str()), + sample.key_expr().as_str()), SampleKind::Delete => println!( ">> [LivelinessSubscriber] Dropped token ('{}')", - sample.key_expr.as_str()), + sample.key_expr().as_str()), } }, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index fc361d77f2..32be4a5534 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -180,10 +180,7 @@ impl AlignQueryable { let entry = entry.unwrap(); result.push(AlignData::Data( OwnedKeyExpr::from(entry.key_expr().clone()), - ( - Value::from(entry), - each.timestamp, - ), + (Value::from(entry), each.timestamp), )); } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index b11a94e4f2..fb46b78082 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -142,10 +142,7 @@ impl Aligner { for sample in replies { result.insert( sample.key_expr().clone().into(), - ( - sample.timestamp().unwrap().clone(), - Value::from(sample), - ), + (*sample.timestamp().unwrap(), Value::from(sample)), ); } (result, no_err) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 895f2e1914..0708dcabd9 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -276,7 +276,7 @@ impl StorageService { } let matching_keys = if sample.key_expr().is_wild() { - self.get_matching_keys(&sample.key_expr()).await + self.get_matching_keys(sample.key_expr()).await } else { vec![sample.key_expr().clone().into()] }; @@ -309,20 +309,15 @@ impl StorageService { let Value { payload, encoding, .. } = overriding_update.data.value; - let sample_to_store = Sample::new(KeyExpr::from(k.clone()), payload) + Sample::new(KeyExpr::from(k.clone()), payload) .with_encoding(encoding) .with_timestamp(overriding_update.data.timestamp) - .with_kind(overriding_update.kind); - sample_to_store - } - None => { - let sample_to_store = - Sample::new(KeyExpr::from(k.clone()), sample.payload().clone()) - .with_encoding(sample.encoding().clone()) - .with_timestamp(sample.timestamp().unwrap().clone()) - .with_kind(sample.kind()); - sample_to_store + .with_kind(overriding_update.kind) } + None => Sample::new(KeyExpr::from(k.clone()), sample.payload().clone()) + .with_encoding(sample.encoding().clone()) + .with_timestamp(*sample.timestamp().unwrap()) + .with_kind(sample.kind()), }; let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { @@ -340,16 +335,16 @@ impl StorageService { stripped_key, Value::new(sample_to_store.payload().clone()) .with_encoding(sample_to_store.encoding().clone()), - sample_to_store.timestamp().unwrap().clone(), + *sample_to_store.timestamp().unwrap(), ) .await } SampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, sample_to_store.timestamp().unwrap().clone()) + self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) .await; storage - .delete(stripped_key, sample_to_store.timestamp().unwrap().clone()) + .delete(stripped_key, *sample_to_store.timestamp().unwrap()) .await } }; @@ -363,7 +358,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), sample_to_store.timestamp().unwrap().clone())); + .send((k.clone(), *sample_to_store.timestamp().unwrap())); match sending { Ok(_) => (), Err(e) => { @@ -398,7 +393,7 @@ impl StorageService { // @TODO: change into a better store that does incremental writes let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; - let timestamp = sample.timestamp().unwrap().clone(); + let timestamp = *sample.timestamp().unwrap(); wildcards.insert( &key, Update { diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 470f795f2b..480e490fdd 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -305,7 +305,7 @@ impl MergeQueue { fn push(&mut self, sample: Sample) { if let Some(ts) = sample.timestamp() { - self.timstamped.entry(ts.clone()).or_insert(sample); + self.timstamped.entry(*ts).or_insert(sample); } else { self.untimestamped.push_back(sample); } diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 24119e7b1e..1a9df306b2 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -52,13 +52,13 @@ fn pubsub() { task::sleep(SLEEP).await; ztimeout!(publisher1.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos().clone(); + let qos = *ztimeout!(subscriber.recv_async()).unwrap().qos(); assert_eq!(qos.priority(), Priority::DataHigh); assert_eq!(qos.congestion_control(), CongestionControl::Drop); ztimeout!(publisher2.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos().clone(); + let qos = *ztimeout!(subscriber.recv_async()).unwrap().qos(); assert_eq!(qos.priority(), Priority::DataLow); assert_eq!(qos.congestion_control(), CongestionControl::Block); From 0ca41e817044e80a6c422122f46aa3e60821ce64 Mon Sep 17 00:00:00 2001 From: Denis Biryukov Date: Wed, 13 Mar 2024 15:26:19 +0100 Subject: [PATCH 015/357] mark remaining sample-mutating methods as unstable and hidden --- zenoh/src/sample.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 1ac04313ab..9b9c55822e 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -520,10 +520,12 @@ impl Sample { self } - #[inline] /// Ensure that an associated Timestamp is present in this Sample. /// If not, a new one is created with the current system time and 0x00 as id. /// Get the timestamp of this sample (either existing one or newly created) + #[inline] + #[doc(hidden)] + #[zenoh_macros::unstable] pub fn ensure_timestamp(&mut self) -> &Timestamp { if let Some(ref timestamp) = self.timestamp { timestamp @@ -542,8 +544,9 @@ impl Sample { } /// Gets the mutable sample attachment: a map of key-value pairs, where each key and value are byte-slices. - #[zenoh_macros::unstable] #[inline] + #[doc(hidden)] + #[zenoh_macros::unstable] pub fn attachment_mut(&mut self) -> &mut Option { &mut self.attachment } From ea7179f789dba510c0e2070188a374768850c76e Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 14 Mar 2024 12:21:03 +0100 Subject: [PATCH 016/357] Add express support in the pipeline --- commons/zenoh-protocol/src/network/mod.rs | 16 +++++++++++++-- io/zenoh-transport/src/common/pipeline.rs | 24 ++++++++++++++--------- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 6af7fef243..0e198ddf0f 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -110,6 +110,18 @@ impl NetworkMessage { true } + #[inline] + pub fn is_express(&self) -> bool { + match &self.body { + NetworkBody::Push(msg) => msg.ext_qos.is_express(), + NetworkBody::Request(msg) => msg.ext_qos.is_express(), + NetworkBody::Response(msg) => msg.ext_qos.is_express(), + NetworkBody::ResponseFinal(msg) => msg.ext_qos.is_express(), + NetworkBody::Declare(msg) => msg.ext_qos.is_express(), + NetworkBody::OAM(msg) => msg.ext_qos.is_express(), + } + } + #[inline] pub fn is_droppable(&self) -> bool { if !self.is_reliable() { @@ -117,11 +129,11 @@ impl NetworkMessage { } let cc = match &self.body { - NetworkBody::Declare(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Push(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Request(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Response(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_congestion_control(), + NetworkBody::Declare(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::OAM(msg) => msg.ext_qos.get_congestion_control(), }; @@ -131,11 +143,11 @@ impl NetworkMessage { #[inline] pub fn priority(&self) -> Priority { match &self.body { - NetworkBody::Declare(msg) => msg.ext_qos.get_priority(), NetworkBody::Push(msg) => msg.ext_qos.get_priority(), NetworkBody::Request(msg) => msg.ext_qos.get_priority(), NetworkBody::Response(msg) => msg.ext_qos.get_priority(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_priority(), + NetworkBody::Declare(msg) => msg.ext_qos.get_priority(), NetworkBody::OAM(msg) => msg.ext_qos.get_priority(), } } diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 3968eabdf5..516834fa41 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -161,12 +161,18 @@ impl StageIn { } macro_rules! zretok { - ($batch:expr) => {{ - let bytes = $batch.len(); - *c_guard = Some($batch); - drop(c_guard); - self.s_out.notify(bytes); - return true; + ($batch:expr, $msg:expr) => {{ + if $msg.is_express() { + // Move out existing batch + self.s_out.move_batch($batch); + return true; + } else { + let bytes = $batch.len(); + *c_guard = Some($batch); + drop(c_guard); + self.s_out.notify(bytes); + return true; + } }}; } @@ -174,7 +180,7 @@ impl StageIn { let mut batch = zgetbatch_rets!(false); // Attempt the serialization on the current batch let e = match batch.encode(&*msg) { - Ok(_) => zretok!(batch), + Ok(_) => zretok!(batch, msg), Err(e) => e, }; @@ -194,7 +200,7 @@ impl StageIn { if let BatchError::NewFrame = e { // Attempt a serialization with a new frame if batch.encode((&*msg, &frame)).is_ok() { - zretok!(batch); + zretok!(batch, msg); } } @@ -206,7 +212,7 @@ impl StageIn { // Attempt a second serialization on fully empty batch if batch.encode((&*msg, &frame)).is_ok() { - zretok!(batch); + zretok!(batch, msg); } // The second serialization attempt has failed. This means that the message is From 62bf7d3c12d1e4bf56375a6af7a6bd9ebdf8e81a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 14 Mar 2024 12:34:42 +0100 Subject: [PATCH 017/357] Add express support to publisher and put --- zenoh/src/publication.rs | 24 ++++++++++++++++++++++-- zenoh/src/session.rs | 2 ++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f12842d081..75d4ddc2b7 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -102,6 +102,13 @@ impl PutBuilder<'_, '_> { self } + /// Change the `congestion_control` to apply when routing the data. + #[inline] + pub fn express(mut self, is_express: bool) -> Self { + self.publisher = self.publisher.express(is_express); + self + } + /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -141,6 +148,7 @@ impl SyncResolve for PutBuilder<'_, '_> { key_expr, congestion_control, priority, + is_express, destination, } = self.publisher; @@ -151,6 +159,7 @@ impl SyncResolve for PutBuilder<'_, '_> { key_expr: key_expr?, congestion_control, priority, + is_express, destination, }; @@ -248,6 +257,7 @@ pub struct Publisher<'a> { pub(crate) key_expr: KeyExpr<'a>, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, + pub(crate) is_express: bool, pub(crate) destination: Locality, } @@ -738,6 +748,7 @@ pub struct PublisherBuilder<'a, 'b: 'a> { pub(crate) key_expr: ZResult>, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, + pub(crate) is_express: bool, pub(crate) destination: Locality, } @@ -751,6 +762,7 @@ impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { }, congestion_control: self.congestion_control, priority: self.priority, + is_express: self.is_express, destination: self.destination, } } @@ -771,6 +783,13 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { self } + /// Change the `congestion_control` to apply when routing the data. + #[inline] + pub fn express(mut self, is_express: bool) -> Self { + self.is_express = is_express; + self + } + /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -830,6 +849,7 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { key_expr, congestion_control: self.congestion_control, priority: self.priority, + is_express: self.is_express, destination: self.destination, }; log::trace!("publish({:?})", publisher.key_expr); @@ -867,7 +887,7 @@ fn resolve_put( ext_qos: ext::QoSType::new( publisher.priority.into(), publisher.congestion_control, - false, + publisher.is_express, ), ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -933,7 +953,7 @@ fn resolve_put( qos: QoS::from(ext::QoSType::new( publisher.priority.into(), publisher.congestion_control, - false, + publisher.is_express, )), }; diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 4c303ae974..5e706a0da8 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -337,6 +337,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { key_expr: key_expr.try_into().map_err(Into::into), congestion_control: CongestionControl::DEFAULT, priority: Priority::DEFAULT, + is_express: false, destination: Locality::default(), } } @@ -1909,6 +1910,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { key_expr: key_expr.try_into().map_err(Into::into), congestion_control: CongestionControl::DEFAULT, priority: Priority::DEFAULT, + is_express: false, destination: Locality::default(), } } From 4d8ec6ca2d2f326c02af5aa71a92e68200dd2ba0 Mon Sep 17 00:00:00 2001 From: Gabriele Baldoni Date: Thu, 14 Mar 2024 18:34:55 +0000 Subject: [PATCH 018/357] fix(828): ensuring valid JSON response from REST API (#831) * fix(828): ensuring valid JSON response from REST API Signed-off-by: gabrik * fix(828): improved JSON format conversion Signed-off-by: gabrik * chore: addressing comments Signed-off-by: gabrik * fix(828): added 'into_string' for StringOrBase64 Signed-off-by: gabrik * chore: address comments Signed-off-by: gabrik --------- Signed-off-by: gabrik --- plugins/zenoh-plugin-rest/src/lib.rs | 85 ++++++++++++++++++---------- zenoh/src/payload.rs | 8 +++ 2 files changed, 64 insertions(+), 29 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 1a99d7b5a4..39225b5d25 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -18,9 +18,10 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use async_std::prelude::FutureExt; -use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; +use base64::Engine; use futures::StreamExt; use http_types::Method; +use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::convert::TryFrom; use std::str::FromStr; @@ -28,6 +29,7 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; +use zenoh::payload::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; @@ -46,36 +48,57 @@ lazy_static::lazy_static! { } const RAW_KEY: &str = "_raw"; -fn payload_to_json(payload: Payload) -> String { - payload - .deserialize::() - .unwrap_or_else(|_| format!(r#""{}""#, b64_std_engine.encode(payload.contiguous()))) +#[derive(Serialize, Deserialize)] +struct JSONSample { + key: String, + value: serde_json::Value, + encoding: String, + time: Option, } -fn sample_to_json(sample: Sample) -> String { - format!( - r#"{{ "key": "{}", "value": {}, "encoding": "{}", "time": "{}" }}"#, - sample.key_expr.as_str(), - payload_to_json(sample.payload), - sample.encoding, - if let Some(ts) = sample.timestamp { - ts.to_string() - } else { - "None".to_string() +pub fn base64_encode(data: &[u8]) -> String { + use base64::engine::general_purpose; + general_purpose::STANDARD.encode(data) +} + +fn payload_to_json(payload: Payload, encoding: &Encoding) -> serde_json::Value { + match payload.is_empty() { + // If the value is empty return a JSON null + true => serde_json::Value::Null, + // if it is not check the encoding + false => { + match encoding { + // If it is a JSON try to deserialize as json, if it fails fallback to base64 + &Encoding::APPLICATION_JSON | &Encoding::TEXT_JSON | &Encoding::TEXT_JSON5 => { + serde_json::from_slice::(&payload.contiguous()).unwrap_or( + serde_json::Value::String(StringOrBase64::from(payload).into_string()), + ) + } + // otherwise convert to JSON string + _ => serde_json::Value::String(StringOrBase64::from(payload).into_string()), + } } - ) + } } -fn result_to_json(sample: Result) -> String { +fn sample_to_json(sample: Sample) -> JSONSample { + JSONSample { + key: sample.key_expr.as_str().to_string(), + value: payload_to_json(sample.payload, &sample.encoding), + encoding: sample.encoding.to_string(), + time: sample.timestamp.map(|ts| ts.to_string()), + } +} + +fn result_to_json(sample: Result) -> JSONSample { match sample { Ok(sample) => sample_to_json(sample), - Err(err) => { - format!( - r#"{{ "key": "ERROR", "value": {}, "encoding": "{}"}}"#, - payload_to_json(err.payload), - err.encoding, - ) - } + Err(err) => JSONSample { + key: "ERROR".into(), + value: payload_to_json(err.payload, &err.encoding), + encoding: err.encoding.to_string(), + time: None, + }, } } @@ -83,10 +106,10 @@ async fn to_json(results: flume::Receiver) -> String { let values = results .stream() .filter_map(move |reply| async move { Some(result_to_json(reply.sample)) }) - .collect::>() - .await - .join(",\n"); - format!("[\n{values}\n]\n") + .collect::>() + .await; + + serde_json::to_string(&values).unwrap_or("[]".into()) } async fn to_json_response(results: flume::Receiver) -> Response { @@ -321,8 +344,12 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result String { + match self { + StringOrBase64::String(s) | StringOrBase64::Base64(s) => s, + } + } +} + impl Deref for StringOrBase64 { type Target = String; From 622b230286ca37899f768b24b865e18669c2b0c1 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 15 Mar 2024 10:12:14 +0100 Subject: [PATCH 019/357] Add express support (#829) * Improve docs * Add express to examples * Fix doc in sample.rs --- examples/examples/z_ping.rs | 9 +++++++-- examples/examples/z_pong.rs | 10 +++++++--- examples/examples/z_pub_thr.rs | 4 ++++ zenoh/src/publication.rs | 8 ++++++-- zenoh/src/sample.rs | 2 +- 5 files changed, 25 insertions(+), 8 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index cb6fecd81a..a57c937e48 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -22,7 +22,7 @@ fn main() { // initiate logging env_logger::init(); - let (config, warmup, size, n) = parse_args(); + let (config, warmup, size, n, express) = parse_args(); let session = zenoh::open(config).res().unwrap(); // The key expression to publish data on @@ -35,6 +35,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) + .express(express) .res() .unwrap(); @@ -78,6 +79,9 @@ fn main() { #[derive(Parser)] struct Args { + /// express for sending data + #[arg(long, default_value = "false")] + no_express: bool, #[arg(short, long, default_value = "1")] /// The number of seconds to warm up (float) warmup: f64, @@ -90,12 +94,13 @@ struct Args { common: CommonArgs, } -fn parse_args() -> (Config, Duration, usize, usize) { +fn parse_args() -> (Config, Duration, usize, usize, bool) { let args = Args::parse(); ( args.common.into(), Duration::from_secs_f64(args.warmup), args.payload_size, args.samples, + !args.no_express, ) } diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index c2412b6d37..576ef232e5 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -21,7 +21,7 @@ fn main() { // initiate logging env_logger::init(); - let config = parse_args(); + let (config, express) = parse_args(); let session = zenoh::open(config).res().unwrap().into_arc(); @@ -34,6 +34,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_pong) .congestion_control(CongestionControl::Block) + .express(express) .res() .unwrap(); @@ -47,11 +48,14 @@ fn main() { #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] struct Args { + /// express for sending data + #[arg(long, default_value = "false")] + no_express: bool, #[command(flatten)] common: CommonArgs, } -fn parse_args() -> Config { +fn parse_args() -> (Config, bool) { let args = Args::parse(); - args.common.into() + (args.common.into(), !args.no_express) } diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index c042b2e7a2..4354ad2e68 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -41,6 +41,7 @@ fn main() { .declare_publisher("test/thr") .congestion_control(CongestionControl::Block) .priority(prio) + .express(args.express) .res() .unwrap(); @@ -65,6 +66,9 @@ fn main() { #[derive(Parser, Clone, PartialEq, Eq, Hash, Debug)] struct Args { + /// express for sending data + #[arg(long, default_value = "false")] + express: bool, /// Priority for sending data #[arg(short, long)] priority: Option, diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 75d4ddc2b7..1531cab606 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -102,7 +102,9 @@ impl PutBuilder<'_, '_> { self } - /// Change the `congestion_control` to apply when routing the data. + /// Change the `express` policy to apply when routing the data. + /// When express is set to `true`, then the message will not be batched. + /// This usually has a positive impact on latency but negative impact on throughput. #[inline] pub fn express(mut self, is_express: bool) -> Self { self.publisher = self.publisher.express(is_express); @@ -783,7 +785,9 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { self } - /// Change the `congestion_control` to apply when routing the data. + /// Change the `express` policy to apply when routing the data. + /// When express is set to `true`, then the message will not be batched. + /// This usually has a positive impact on latency but negative impact on throughput. #[inline] pub fn express(mut self, is_express: bool) -> Self { self.is_express = is_express; diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 9c68b460d9..36ebeeb129 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -556,7 +556,7 @@ impl QoS { self.inner.get_congestion_control() } - /// Gets express flag value. If true, the message is not batched during transmission, in order to reduce latency. + /// Gets express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. pub fn express(&self) -> bool { self.inner.is_express() } From d73da7d70fef25d76bf94945792da0f0adffed0b Mon Sep 17 00:00:00 2001 From: Denis Biryukov Date: Fri, 15 Mar 2024 11:22:00 +0100 Subject: [PATCH 020/357] clippy --- plugins/zenoh-plugin-rest/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index e0df8f286b..e2718f6579 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -344,7 +344,6 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result Date: Mon, 18 Mar 2024 13:20:01 +0100 Subject: [PATCH 021/357] Rename IntoCallbackReceiver trait to IntoHandler trait (#816) --- zenoh-ext/src/querying_subscriber.rs | 36 ++++++++--------- zenoh/src/handlers.rs | 60 ++++++++++++++++------------ zenoh/src/liveliness.rs | 40 +++++++++---------- zenoh/src/prelude.rs | 4 +- zenoh/src/publication.rs | 22 +++++----- zenoh/src/query.rs | 20 +++++----- zenoh/src/queryable.rs | 22 +++++----- zenoh/src/scouting.rs | 22 +++++----- zenoh/src/subscriber.rs | 42 +++++++++---------- 9 files changed, 138 insertions(+), 130 deletions(-) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 480e490fdd..8cb5480e58 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -105,7 +105,7 @@ impl<'a, 'b, KeySpace> QueryingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandle handler: Handler, ) -> QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> where - Handler: zenoh::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: zenoh::prelude::IntoHandler<'static, Sample>, { let QueryingSubscriberBuilder { session, @@ -214,17 +214,17 @@ impl<'a, 'b, KeySpace, Handler> QueryingSubscriberBuilder<'a, 'b, KeySpace, Hand impl<'a, KeySpace, Handler> Resolvable for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample>, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample>, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } impl SyncResolve for QueryingSubscriberBuilder<'_, '_, KeySpace, Handler> where KeySpace: Into + Clone, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { let session = self.session.clone(); @@ -272,8 +272,8 @@ where impl<'a, KeySpace, Handler> AsyncResolve for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> where KeySpace: Into + Clone, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { type Future = Ready; @@ -462,7 +462,7 @@ where handler: Handler, ) -> FetchingSubscriberBuilder<'a, 'b, KeySpace, Handler, Fetch, TryIntoSample> where - Handler: zenoh::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: zenoh::prelude::IntoHandler<'static, Sample>, { let FetchingSubscriberBuilder { session, @@ -536,11 +536,11 @@ impl< TryIntoSample, > Resolvable for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> where - Handler: IntoCallbackReceiverPair<'static, Sample>, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample>, + Handler::Handler: Send, TryIntoSample: ExtractSample, { - type To = ZResult>; + type To = ZResult>; } impl< @@ -551,8 +551,8 @@ impl< > SyncResolve for FetchingSubscriberBuilder<'_, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, TryIntoSample: ExtractSample + Send + Sync, { fn res_sync(self) -> ::To { @@ -569,8 +569,8 @@ impl< > AsyncResolve for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, TryIntoSample: ExtractSample + Send + Sync, { type Future = Ready; @@ -643,14 +643,14 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { ) -> ZResult where KeySpace: Into, - Handler: IntoCallbackReceiverPair<'static, Sample, Receiver = Receiver> + Send, + Handler: IntoHandler<'static, Sample, Handler = Receiver> + Send, TryIntoSample: ExtractSample + Send + Sync, { let state = Arc::new(Mutex::new(InnerState { pending_fetches: 0, merge_queue: MergeQueue::new(), })); - let (callback, receiver) = conf.handler.into_cb_receiver_pair(); + let (callback, receiver) = conf.handler.into_handler(); let sub_callback = { let state = state.clone(); diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs index 69828a5d7f..e5ec3bb0dc 100644 --- a/zenoh/src/handlers.rs +++ b/zenoh/src/handlers.rs @@ -17,34 +17,36 @@ use crate::API_DATA_RECEPTION_CHANNEL_SIZE; /// An alias for `Arc`. pub type Dyn = std::sync::Arc; + /// An immutable callback function. pub type Callback<'a, T> = Dyn; -/// A type that can be converted into a [`Callback`]-receiver pair. +/// A type that can be converted into a [`Callback`]-handler pair. /// /// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, -/// while granting you access to the receiver through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. +/// while granting you access to the handler through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. /// /// Any closure that accepts `T` can be converted into a pair of itself and `()`. -pub trait IntoCallbackReceiverPair<'a, T> { - type Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'a, T>, Self::Receiver); +pub trait IntoHandler<'a, T> { + type Handler; + + fn into_handler(self) -> (Callback<'a, T>, Self::Handler); } -impl<'a, T, F> IntoCallbackReceiverPair<'a, T> for F + +impl<'a, T, F> IntoHandler<'a, T> for F where F: Fn(T) + Send + Sync + 'a, { - type Receiver = (); - fn into_cb_receiver_pair(self) -> (Callback<'a, T>, Self::Receiver) { + type Handler = (); + fn into_handler(self) -> (Callback<'a, T>, Self::Handler) { (Dyn::from(self), ()) } } -impl IntoCallbackReceiverPair<'static, T> - for (flume::Sender, flume::Receiver) -{ - type Receiver = flume::Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { +impl IntoHandler<'static, T> for (flume::Sender, flume::Receiver) { + type Handler = flume::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { let (sender, receiver) = self; ( Dyn::new(move |t| { @@ -56,18 +58,24 @@ impl IntoCallbackReceiverPair<'static, T> ) } } + +/// The default handler in Zenoh is a FIFO queue. pub struct DefaultHandler; -impl IntoCallbackReceiverPair<'static, T> for DefaultHandler { - type Receiver = flume::Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { - flume::bounded(*API_DATA_RECEPTION_CHANNEL_SIZE).into_cb_receiver_pair() + +impl IntoHandler<'static, T> for DefaultHandler { + type Handler = flume::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + flume::bounded(*API_DATA_RECEPTION_CHANNEL_SIZE).into_handler() } } -impl IntoCallbackReceiverPair<'static, T> + +impl IntoHandler<'static, T> for (std::sync::mpsc::SyncSender, std::sync::mpsc::Receiver) { - type Receiver = std::sync::mpsc::Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { + type Handler = std::sync::mpsc::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { let (sender, receiver) = self; ( Dyn::new(move |t| { @@ -96,7 +104,7 @@ pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { /// - `callback` will never be called once `drop` has started. /// - `drop` will only be called **once**, and **after every** `callback` has ended. /// - The two previous guarantees imply that `call` and `drop` are never called concurrently. -pub struct CallbackPair +pub struct CallbackDrop where DropFn: FnMut() + Send + Sync + 'static, { @@ -104,7 +112,7 @@ where pub drop: DropFn, } -impl Drop for CallbackPair +impl Drop for CallbackDrop where DropFn: FnMut() + Send + Sync + 'static, { @@ -113,14 +121,14 @@ where } } -impl<'a, OnEvent, Event, DropFn> IntoCallbackReceiverPair<'a, Event> - for CallbackPair +impl<'a, OnEvent, Event, DropFn> IntoHandler<'a, Event> for CallbackDrop where OnEvent: Fn(Event) + Send + Sync + 'a, DropFn: FnMut() + Send + Sync + 'static, { - type Receiver = (); - fn into_cb_receiver_pair(self) -> (Callback<'a, Event>, Self::Receiver) { + type Handler = (); + + fn into_handler(self) -> (Callback<'a, Event>, Self::Handler) { (Dyn::from(move |evt| (self.callback)(evt)), ()) } } diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index d4229db4cc..4103504f13 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -484,7 +484,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -507,7 +507,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> LivelinessSubscriberBuilder<'a, 'b, Handler> where - Handler: crate::handlers::IntoCallbackReceiverPair<'static, Sample>, + Handler: crate::handlers::IntoHandler<'static, Sample>, { let LivelinessSubscriberBuilder { session, @@ -525,23 +525,23 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { #[zenoh_macros::unstable] impl<'a, Handler> Resolvable for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } #[zenoh_macros::unstable] impl<'a, Handler> SyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { #[zenoh_macros::unstable] fn res_sync(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); session .declare_subscriber_inner( &key_expr, @@ -564,8 +564,8 @@ where #[zenoh_macros::unstable] impl<'a, Handler> AsyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { type Future = Ready; @@ -677,7 +677,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ``` @@ -700,7 +700,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> LivelinessGetBuilder<'a, 'b, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply>, + Handler: IntoHandler<'static, Reply>, { let LivelinessGetBuilder { session, @@ -728,19 +728,19 @@ impl<'a, 'b, Handler> LivelinessGetBuilder<'a, 'b, Handler> { impl Resolvable for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { - type To = ZResult; + type To = ZResult; } impl SyncResolve for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); self.session .query( @@ -761,8 +761,8 @@ where impl AsyncResolve for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { type Future = Ready; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 177906e9b1..26c93e1801 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -37,11 +37,11 @@ pub(crate) mod common { pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; pub use crate::config::{self, Config, ValidatedMap}; - pub use crate::handlers::IntoCallbackReceiverPair; + pub use crate::handlers::IntoHandler; + pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; - pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::encoding::Encoding; /// The encoding of a zenoh `Value`. diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 1531cab606..392c0bf8c1 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -22,7 +22,7 @@ use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ - handlers::{Callback, DefaultHandler, IntoCallbackReceiverPair}, + handlers::{Callback, DefaultHandler, IntoHandler}, Id, }; use std::future::Ready; @@ -1180,7 +1180,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { self.callback(crate::handlers::locked(callback)) } - /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -1208,7 +1208,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> MatchingListenerBuilder<'a, Handler> where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, MatchingStatus>, + Handler: crate::prelude::IntoHandler<'static, MatchingStatus>, { let MatchingListenerBuilder { publisher, @@ -1221,21 +1221,21 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { #[zenoh_macros::unstable] impl<'a, Handler> Resolvable for MatchingListenerBuilder<'a, Handler> where - Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, MatchingStatus> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } #[zenoh_macros::unstable] impl<'a, Handler> SyncResolve for MatchingListenerBuilder<'a, Handler> where - Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, MatchingStatus> + Send, + Handler::Handler: Send, { #[zenoh_macros::unstable] fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); self.publisher .session .declare_matches_listener_inner(&self.publisher, callback) @@ -1253,8 +1253,8 @@ where #[zenoh_macros::unstable] impl<'a, Handler> AsyncResolve for MatchingListenerBuilder<'a, Handler> where - Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, MatchingStatus> + Send, + Handler::Handler: Send, { type Future = Ready; diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index a848913c7a..fe48748ad4 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -209,7 +209,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ``` @@ -231,7 +231,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> GetBuilder<'a, 'b, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply>, + Handler: IntoHandler<'static, Reply>, { let GetBuilder { session, @@ -362,19 +362,19 @@ impl Default for ReplyKeyExpr { impl Resolvable for GetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { - type To = ZResult; + type To = ZResult; } impl SyncResolve for GetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); self.session .query( @@ -395,8 +395,8 @@ where impl AsyncResolve for GetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { type Future = Ready; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index d98df046b7..d2eabcdc2a 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -599,7 +599,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the queries for this Queryable with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the queries for this Queryable with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -621,7 +621,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> QueryableBuilder<'a, 'b, Handler> where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Query>, + Handler: crate::prelude::IntoHandler<'static, Query>, { let QueryableBuilder { session, @@ -657,7 +657,7 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { } } -/// A queryable that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). +/// A queryable that provides data through a [`Handler`](crate::prelude::IntoHandler). /// /// Queryables can be created from a zenoh [`Session`] /// with the [`declare_queryable`](crate::Session::declare_queryable) function @@ -740,20 +740,20 @@ impl Deref for Queryable<'_, Receiver> { impl<'a, Handler> Resolvable for QueryableBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Query> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Query> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } impl<'a, Handler> SyncResolve for QueryableBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Query> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Query> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); session .declare_queryable_inner( &self.key_expr?.to_wire(&session), @@ -774,8 +774,8 @@ where impl<'a, Handler> AsyncResolve for QueryableBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Query> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Query> + Send, + Handler::Handler: Send, { type Future = Ready; diff --git a/zenoh/src/scouting.rs b/zenoh/src/scouting.rs index ea09823ea1..470e2f1c61 100644 --- a/zenoh/src/scouting.rs +++ b/zenoh/src/scouting.rs @@ -115,7 +115,7 @@ impl ScoutBuilder { self.callback(locked(callback)) } - /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -136,7 +136,7 @@ impl ScoutBuilder { #[inline] pub fn with(self, handler: Handler) -> ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello>, + Handler: crate::prelude::IntoHandler<'static, Hello>, { let ScoutBuilder { what, @@ -153,27 +153,27 @@ impl ScoutBuilder { impl Resolvable for ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, - Handler::Receiver: Send, + Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } impl SyncResolve for ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, - Handler::Receiver: Send, + Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) } } impl AsyncResolve for ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, - Handler::Receiver: Send, + Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler::Handler: Send, { type Future = Ready; @@ -231,7 +231,7 @@ impl fmt::Debug for ScoutInner { } } -/// A scout that returns [`Hello`] messages through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). +/// A scout that returns [`Hello`] messages through a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index d4c3257472..413c9201f2 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -13,7 +13,7 @@ // //! Subscribing primitives. -use crate::handlers::{locked, Callback, DefaultHandler, IntoCallbackReceiverPair}; +use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::key_expr::KeyExpr; use crate::prelude::Locality; use crate::sample::Sample; @@ -392,7 +392,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run @@ -414,7 +414,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Mode, Handler> where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: crate::prelude::IntoHandler<'static, Sample>, { let SubscriberBuilder { session, @@ -511,21 +511,21 @@ impl<'a, 'b, Mode, Handler> SubscriberBuilder<'a, 'b, Mode, Handler> { // Push mode impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PushMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); session .declare_subscriber_inner( &key_expr, @@ -550,8 +550,8 @@ where impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { type Future = Ready; @@ -563,21 +563,21 @@ where // Pull mode impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PullMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { fn res_sync(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, receiver) = self.handler.into_handler(); session .declare_subscriber_inner( &key_expr, @@ -604,8 +604,8 @@ where impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { type Future = Ready; @@ -614,7 +614,7 @@ where } } -/// A subscriber that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). +/// A subscriber that provides data through a [`Handler`](crate::prelude::IntoHandler). /// /// Subscribers can be created from a zenoh [`Session`](crate::Session) /// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function @@ -647,7 +647,7 @@ pub struct Subscriber<'a, Receiver> { pub receiver: Receiver, } -/// A [`PullMode`] subscriber that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). +/// A [`PullMode`] subscriber that provides data through a [`Handler`](crate::prelude::IntoHandler). /// /// PullSubscribers only provide data when explicitely pulled by the /// application with the [`pull`](PullSubscriber::pull) function. From 665c90f0b326b125658267b2e50c4a6b43b3a42a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 18 Mar 2024 18:10:32 +0100 Subject: [PATCH 022/357] Optimize zint encode/decode (#838) * Rebase on protocol_changes * Fix rebase conflict --- commons/zenoh-codec/src/core/zint.rs | 52 ++++++++++++++++------------ commons/zenoh-codec/tests/codec.rs | 21 ++++++++++- 2 files changed, 50 insertions(+), 23 deletions(-) diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index 1c2f5a28e4..0daff7348b 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -17,7 +17,7 @@ use zenoh_buffers::{ writer::{DidntWrite, Writer}, }; -const VLE_LEN: usize = 10; +const VLE_LEN: usize = 9; impl LCodec for Zenoh080 { fn w_len(self, x: u64) -> usize { @@ -29,7 +29,6 @@ impl LCodec for Zenoh080 { const B6: u64 = u64::MAX << (7 * 6); const B7: u64 = u64::MAX << (7 * 7); const B8: u64 = u64::MAX << (7 * 8); - const B9: u64 = u64::MAX << (7 * 9); if (x & B1) == 0 { 1 @@ -47,10 +46,8 @@ impl LCodec for Zenoh080 { 7 } else if (x & B8) == 0 { 8 - } else if (x & B9) == 0 { - 9 } else { - 10 + 9 } } } @@ -112,15 +109,31 @@ where fn write(self, writer: &mut W, mut x: u64) -> Self::Output { writer.with_slot(VLE_LEN, move |buffer| { let mut len = 0; - let mut b = x as u8; - while x > 0x7f { - buffer[len] = b | 0x80; + while (x & !0x7f_u64) != 0 { + // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is + // the maximum number of bytes a VLE can take once encoded. + // I.e.: x is shifted 7 bits to the right every iteration, + // the loop is at most VLE_LEN iterations. + unsafe { + *buffer.get_unchecked_mut(len) = (x as u8) | 0x80_u8; + } len += 1; x >>= 7; - b = x as u8; } - buffer[len] = b; - len + 1 + // In case len == VLE_LEN then all the bits have already been written in the latest iteration. + // Else we haven't written all the necessary bytes yet. + if len != VLE_LEN { + // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is + // the maximum number of bytes a VLE can take once encoded. + // I.e.: x is shifted 7 bits to the right every iteration, + // the loop is at most VLE_LEN iterations. + unsafe { + *buffer.get_unchecked_mut(len) = x as u8; + } + len += 1; + } + // The number of written bytes + len })?; Ok(()) } @@ -137,19 +150,14 @@ where let mut v = 0; let mut i = 0; - let mut k = VLE_LEN; - while b > 0x7f && k > 0 { - v |= ((b & 0x7f) as u64) << i; - i += 7; + // 7 * VLE_LEN is beyond the maximum number of shift bits + while (b & 0x80_u8) != 0 && i != 7 * (VLE_LEN - 1) { + v |= ((b & 0x7f_u8) as u64) << i; b = reader.read_u8()?; - k -= 1; - } - if k > 0 { - v |= ((b & 0x7f) as u64) << i; - Ok(v) - } else { - Err(DidntRead) + i += 7; } + v |= (b as u64) << i; + Ok(v) } } diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 7f23214b49..3bca8b7489 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -121,10 +121,28 @@ macro_rules! run { // Core #[test] fn codec_zint() { + run!(u8, { u8::MIN }); + run!(u8, { u8::MAX }); run!(u8, { thread_rng().gen::() }); + + run!(u16, { u16::MIN }); + run!(u16, { u16::MAX }); run!(u16, { thread_rng().gen::() }); + + run!(u32, { u32::MIN }); + run!(u32, { u32::MAX }); run!(u32, { thread_rng().gen::() }); + + run!(u64, { u64::MIN }); + run!(u64, { u64::MAX }); + let codec = Zenoh080::new(); + for i in 1..=codec.w_len(u64::MAX) { + run!(u64, { 1 << (7 * i) }); + } run!(u64, { thread_rng().gen::() }); + + run!(usize, { usize::MIN }); + run!(usize, { usize::MAX }); run!(usize, thread_rng().gen::()); } @@ -138,11 +156,12 @@ fn codec_zint_len() { codec.write(&mut writer, n).unwrap(); assert_eq!(codec.w_len(n), buff.len()); - for i in 1..=9 { + for i in 1..=codec.w_len(u64::MAX) { let mut buff = vec![]; let mut writer = buff.writer(); let n: u64 = 1 << (7 * i); codec.write(&mut writer, n).unwrap(); + println!("ZInt len: {} {:02x?}", n, buff); assert_eq!(codec.w_len(n), buff.len()); } From 7300f4c8fe1c1fd89f1109d5091a642c3c51c298 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 19 Mar 2024 14:35:17 +0100 Subject: [PATCH 023/357] Remove pull API and protocol support (#821) * Remove Pull subscriber * Fix doctest. Remove unused code. * Remove routing code for pull subscriptions * Remove pull mode from DeclareSubscriber * Remove unsupported Put/Del in Request/Response (#839) * Address review comments --------- Co-authored-by: OlivierHecart --- Cargo.lock | 1 + commons/zenoh-codec/src/zenoh/mod.rs | 9 - commons/zenoh-codec/src/zenoh/pull.rs | 93 ----- commons/zenoh-codec/tests/codec.rs | 5 - commons/zenoh-collections/src/ring_buffer.rs | 9 + commons/zenoh-protocol/src/network/declare.rs | 48 +-- commons/zenoh-protocol/src/zenoh/mod.rs | 27 +- commons/zenoh-protocol/src/zenoh/pull.rs | 56 --- examples/Cargo.toml | 1 + examples/examples/z_pull.rs | 72 ++-- io/zenoh-transport/src/shm.rs | 8 +- zenoh-ext/src/subscriber_ext.rs | 6 +- zenoh/src/liveliness.rs | 1 - zenoh/src/net/routing/dispatcher/face.rs | 6 - zenoh/src/net/routing/dispatcher/pubsub.rs | 263 +++----------- zenoh/src/net/routing/dispatcher/queries.rs | 16 - zenoh/src/net/routing/dispatcher/resource.rs | 15 - zenoh/src/net/routing/dispatcher/tables.rs | 2 - zenoh/src/net/routing/hat/client/mod.rs | 15 +- zenoh/src/net/routing/hat/client/pubsub.rs | 34 +- zenoh/src/net/routing/hat/client/queries.rs | 3 +- .../src/net/routing/hat/linkstate_peer/mod.rs | 15 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 35 +- .../net/routing/hat/linkstate_peer/queries.rs | 1 - zenoh/src/net/routing/hat/p2p_peer/mod.rs | 15 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 34 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 3 +- zenoh/src/net/routing/hat/router/mod.rs | 15 +- zenoh/src/net/routing/hat/router/pubsub.rs | 37 +- zenoh/src/net/routing/hat/router/queries.rs | 1 - zenoh/src/net/runtime/adminspace.rs | 96 ++--- zenoh/src/net/tests/tables.rs | 5 - zenoh/src/session.rs | 43 +-- zenoh/src/subscriber.rs | 331 +----------------- 34 files changed, 242 insertions(+), 1079 deletions(-) delete mode 100644 commons/zenoh-codec/src/zenoh/pull.rs delete mode 100644 commons/zenoh-protocol/src/zenoh/pull.rs diff --git a/Cargo.lock b/Cargo.lock index 53f2600071..fa55ca4acd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4534,6 +4534,7 @@ dependencies = [ "rand 0.8.5", "rustc_version 0.4.0", "zenoh", + "zenoh-collections", "zenoh-ext", ] diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index 0d7146dc90..dc38e5ee84 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -13,7 +13,6 @@ // pub mod del; pub mod err; -pub mod pull; pub mod put; pub mod query; pub mod reply; @@ -81,9 +80,6 @@ where fn write(self, writer: &mut W, x: &RequestBody) -> Self::Output { match x { RequestBody::Query(b) => self.write(&mut *writer, b), - RequestBody::Put(b) => self.write(&mut *writer, b), - RequestBody::Del(b) => self.write(&mut *writer, b), - RequestBody::Pull(b) => self.write(&mut *writer, b), } } } @@ -100,9 +96,6 @@ where let codec = Zenoh080Header::new(header); let body = match imsg::mid(codec.header) { id::QUERY => RequestBody::Query(codec.read(&mut *reader)?), - id::PUT => RequestBody::Put(codec.read(&mut *reader)?), - id::DEL => RequestBody::Del(codec.read(&mut *reader)?), - id::PULL => RequestBody::Pull(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -121,7 +114,6 @@ where match x { ResponseBody::Reply(b) => self.write(&mut *writer, b), ResponseBody::Err(b) => self.write(&mut *writer, b), - ResponseBody::Put(b) => self.write(&mut *writer, b), } } } @@ -139,7 +131,6 @@ where let body = match imsg::mid(codec.header) { id::REPLY => ResponseBody::Reply(codec.read(&mut *reader)?), id::ERR => ResponseBody::Err(codec.read(&mut *reader)?), - id::PUT => ResponseBody::Put(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; diff --git a/commons/zenoh-codec/src/zenoh/pull.rs b/commons/zenoh-codec/src/zenoh/pull.rs deleted file mode 100644 index dc71901d58..0000000000 --- a/commons/zenoh-codec/src/zenoh/pull.rs +++ /dev/null @@ -1,93 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; -use alloc::vec::Vec; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; - -use zenoh_protocol::{ - common::imsg, - zenoh::{ - id, - pull::{flag, Pull}, - }, -}; - -impl WCodec<&Pull, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Pull) -> Self::Output { - let Pull { ext_unknown } = x; - - // Header - let mut header = id::PULL; - let mut n_exts = ext_unknown.len() as u8; - if n_exts != 0 { - header |= flag::Z; - } - self.write(&mut *writer, header)?; - - // Extensions - for u in ext_unknown.iter() { - n_exts -= 1; - self.write(&mut *writer, (u, n_exts != 0))?; - } - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != id::PULL { - return Err(DidntRead); - } - - // Extensions - let mut ext_unknown = Vec::new(); - - let mut has_ext = imsg::has_flag(self.header, flag::Z); - while has_ext { - let ext: u8 = self.codec.read(&mut *reader)?; - let (u, ext) = extension::read(reader, "Pull", ext)?; - ext_unknown.push(u); - has_ext = ext; - } - - Ok(Pull { ext_unknown }) - } -} diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 3bca8b7489..2f0e870c4f 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -600,8 +600,3 @@ fn codec_reply() { fn codec_err() { run!(zenoh::Err, zenoh::Err::rand()); } - -#[test] -fn codec_pull() { - run!(zenoh::Pull, zenoh::Pull::rand()); -} diff --git a/commons/zenoh-collections/src/ring_buffer.rs b/commons/zenoh-collections/src/ring_buffer.rs index fd60030ebc..e9f7909d5f 100644 --- a/commons/zenoh-collections/src/ring_buffer.rs +++ b/commons/zenoh-collections/src/ring_buffer.rs @@ -40,6 +40,15 @@ impl RingBuffer { Some(elem) } + #[inline] + pub fn push_force(&mut self, elem: T) -> Option { + self.push(elem).and_then(|elem| { + let ret = self.buffer.pop_front(); + self.buffer.push_back(elem); + ret + }) + } + #[inline] pub fn pull(&mut self) -> Option { let x = self.buffer.pop_front(); diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 2dd8de4ef8..187fa87662 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -146,31 +146,6 @@ impl Declare { } } -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -#[repr(u8)] -pub enum Mode { - #[default] - Push, - Pull, -} - -impl Mode { - pub const DEFAULT: Self = Self::Push; - - #[cfg(feature = "test")] - fn rand() -> Self { - use rand::Rng; - - let mut rng = rand::thread_rng(); - - if rng.gen_bool(0.5) { - Mode::Push - } else { - Mode::Pull - } - } -} - pub mod common { use super::*; @@ -320,9 +295,7 @@ pub mod subscriber { /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// - /// - if R==1 then the subscription is reliable, else it is best effort - /// - if P==1 then the subscription is pull, else it is push - /// + /// - if R==1 then the subscription is reliable, else it is best effort /// /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct DeclareSubscriber { @@ -343,34 +316,29 @@ pub mod subscriber { /// +-+-+-+-+-+-+-+-+ /// |Z|0_1| ID | /// +-+-+-+---------+ - /// % reserved |P|R% + /// % reserved |R% /// +---------------+ /// /// - if R==1 then the subscription is reliable, else it is best effort - /// - if P==1 then the subscription is pull, else it is push /// - rsv: Reserved /// ``` #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct SubscriberInfo { pub reliability: Reliability, - pub mode: Mode, } impl SubscriberInfo { pub const R: u64 = 1; - pub const P: u64 = 1 << 1; pub const DEFAULT: Self = Self { reliability: Reliability::DEFAULT, - mode: Mode::DEFAULT, }; #[cfg(feature = "test")] pub fn rand() -> Self { let reliability = Reliability::rand(); - let mode = Mode::rand(); - Self { reliability, mode } + Self { reliability } } } @@ -387,12 +355,7 @@ pub mod subscriber { } else { Reliability::BestEffort }; - let mode = if imsg::has_option(ext.value, SubscriberInfo::P) { - Mode::Pull - } else { - Mode::Push - }; - Self { reliability, mode } + Self { reliability } } } @@ -402,9 +365,6 @@ pub mod subscriber { if ext.reliability == Reliability::Reliable { v |= SubscriberInfo::R; } - if ext.mode == Mode::Pull { - v |= SubscriberInfo::P; - } Info::new(v) } } diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 3e5d573c43..7bca48f3ba 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -13,7 +13,6 @@ // pub mod del; pub mod err; -pub mod pull; pub mod put; pub mod query; pub mod reply; @@ -21,7 +20,6 @@ pub mod reply; use crate::core::Encoding; pub use del::Del; pub use err::Err; -pub use pull::Pull; pub use put::Put; pub use query::{Consolidation, Query}; pub use reply::Reply; @@ -33,7 +31,6 @@ pub mod id { pub const QUERY: u8 = 0x03; pub const REPLY: u8 = 0x04; pub const ERR: u8 = 0x05; - pub const PULL: u8 = 0x06; } // DataInfo @@ -80,9 +77,6 @@ impl From for PushBody { #[derive(Debug, Clone, PartialEq, Eq)] pub enum RequestBody { Query(Query), - Put(Put), - Del(Del), - Pull(Pull), } impl RequestBody { @@ -92,11 +86,8 @@ impl RequestBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..4) { + match rng.gen_range(0..1) { 0 => RequestBody::Query(Query::rand()), - 1 => RequestBody::Put(Put::rand()), - 2 => RequestBody::Del(Del::rand()), - 3 => RequestBody::Pull(Pull::rand()), _ => unreachable!(), } } @@ -108,24 +99,11 @@ impl From for RequestBody { } } -impl From for RequestBody { - fn from(p: Put) -> RequestBody { - RequestBody::Put(p) - } -} - -impl From for RequestBody { - fn from(d: Del) -> RequestBody { - RequestBody::Del(d) - } -} - // Response #[derive(Debug, Clone, PartialEq, Eq)] pub enum ResponseBody { Reply(Reply), Err(Err), - Put(Put), } impl ResponseBody { @@ -134,10 +112,9 @@ impl ResponseBody { use rand::Rng; let mut rng = rand::thread_rng(); - match rng.gen_range(0..3) { + match rng.gen_range(0..2) { 0 => ResponseBody::Reply(Reply::rand()), 1 => ResponseBody::Err(Err::rand()), - 2 => ResponseBody::Put(Put::rand()), _ => unreachable!(), } } diff --git a/commons/zenoh-protocol/src/zenoh/pull.rs b/commons/zenoh-protocol/src/zenoh/pull.rs deleted file mode 100644 index eb4f7eb55e..0000000000 --- a/commons/zenoh-protocol/src/zenoh/pull.rs +++ /dev/null @@ -1,56 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::common::ZExtUnknown; -use alloc::vec::Vec; - -/// # Pull message -/// -/// ```text -/// Flags: -/// - X: Reserved -/// - X: Reserved -/// - Z: Extension If Z==1 then at least one extension is present -/// -/// 7 6 5 4 3 2 1 0 -/// +-+-+-+-+-+-+-+-+ -/// |Z|X|X| PULL | -/// +-+-+-+---------+ -/// ~ [pull_exts] ~ if Z==1 -/// +---------------+ -/// ``` -pub mod flag { - // pub const X: u8 = 1 << 5; // 0x20 Reserved - // pub const X: u8 = 1 << 6; // 0x40 Reserved - pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Pull { - pub ext_unknown: Vec, -} - -impl Pull { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let mut ext_unknown = Vec::new(); - for _ in 0..rng.gen_range(0..4) { - ext_unknown.push(ZExtUnknown::rand2(1, false)); - } - - Self { ext_unknown } - } -} diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 190894fb18..b827ed2e7f 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -50,6 +50,7 @@ git-version = { workspace = true } json5 = { workspace = true } log = { workspace = true } zenoh = { workspace = true } +zenoh-collections = { workspace = true } zenoh-ext = { workspace = true } [dev-dependencies] diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 910d7614cf..d2c9a5380b 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,9 +13,12 @@ // use async_std::task::sleep; use clap::Parser; -use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use std::{ + sync::{Arc, Mutex}, + time::Duration, +}; +use zenoh::{config::Config, prelude::r#async::*}; +use zenoh_collections::RingBuffer; use zenoh_examples::CommonArgs; #[async_std::main] @@ -23,50 +26,67 @@ async fn main() { // initiate logging env_logger::init(); - let (config, key_expr) = parse_args(); + let (config, key_expr, cache, interval) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); - println!("Declaring Subscriber on '{key_expr}'..."); + println!("Creating a local queue keeping the last {cache} elements..."); + let arb = Arc::new(Mutex::new(RingBuffer::new(cache))); + let arb_c = arb.clone(); - let subscriber = session + println!("Declaring Subscriber on '{key_expr}'..."); + let _subscriber = session .declare_subscriber(&key_expr) - .pull_mode() - .callback(|sample| { - let payload = sample - .payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)); - println!( - ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind(), - sample.key_expr().as_str(), - payload, - ); + .callback(move |sample| { + arb_c.lock().unwrap().push_force(sample); }) .res() .await .unwrap(); - println!("Press CTRL-C to quit..."); - for idx in 0..u32::MAX { - sleep(Duration::from_secs(1)).await; - println!("[{idx:4}] Pulling..."); - subscriber.pull().res().await.unwrap(); + println!("Pulling data every {:#?} seconds", interval); + loop { + let mut res = arb.lock().unwrap().pull(); + print!(">> [Subscriber] Pulling "); + match res.take() { + Some(sample) => { + let payload = sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!( + "{} ('{}': '{}')", + sample.kind(), + sample.key_expr().as_str(), + payload, + ); + } + None => { + println!("nothing... sleep for {:#?}", interval); + sleep(interval).await; + } + } } } -#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(clap::Parser, Clone, PartialEq, Debug)] struct SubArgs { #[arg(short, long, default_value = "demo/example/**")] /// The Key Expression to subscribe to. key: KeyExpr<'static>, + /// The size of the cache. + #[arg(long, default_value = "3")] + cache: usize, + /// The interval for pulling the cache. + #[arg(long, default_value = "5.0")] + interval: f32, #[command(flatten)] common: CommonArgs, } -fn parse_args() -> (Config, KeyExpr<'static>) { +fn parse_args() -> (Config, KeyExpr<'static>, usize, Duration) { let args = SubArgs::parse(); - (args.common.into(), args.key) + let interval = Duration::from_secs_f32(args.interval); + (args.common.into(), args.key, args.cache, interval) } diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 31910f51ae..0dd6662286 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -140,12 +140,9 @@ pub fn map_zmsg_to_shminfo(msg: &mut NetworkMessage) -> ZResult { }, NetworkBody::Request(Request { payload, .. }) => match payload { RequestBody::Query(b) => b.map_to_shminfo(), - RequestBody::Put(b) => b.map_to_shminfo(), - RequestBody::Del(_) | RequestBody::Pull(_) => Ok(false), }, NetworkBody::Response(Response { payload, .. }) => match payload { ResponseBody::Reply(b) => b.map_to_shminfo(), - ResponseBody::Put(b) => b.map_to_shminfo(), ResponseBody::Err(b) => b.map_to_shminfo(), }, NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), @@ -194,13 +191,10 @@ pub fn map_zmsg_to_shmbuf( }, NetworkBody::Request(Request { payload, .. }) => match payload { RequestBody::Query(b) => b.map_to_shmbuf(shmr), - RequestBody::Put(b) => b.map_to_shmbuf(shmr), - RequestBody::Del(_) | RequestBody::Pull(_) => Ok(false), }, NetworkBody::Response(Response { payload, .. }) => match payload { - ResponseBody::Put(b) => b.map_to_shmbuf(shmr), - ResponseBody::Err(b) => b.map_to_shmbuf(shmr), ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), + ResponseBody::Err(b) => b.map_to_shmbuf(shmr), }, NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), } diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 89d3b5f691..6ac796efb1 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -21,7 +21,7 @@ use zenoh::{ liveliness::LivelinessSubscriberBuilder, prelude::Sample, query::{QueryConsolidation, QueryTarget}, - subscriber::{PushMode, Reliability, Subscriber, SubscriberBuilder}, + subscriber::{Reliability, Subscriber, SubscriberBuilder}, }; use crate::ExtractSample; @@ -122,9 +122,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { fn querying(self) -> QueryingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler>; } -impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> - for SubscriberBuilder<'a, 'b, PushMode, Handler> -{ +impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilder<'a, 'b, Handler> { type KeySpace = crate::UserSpace; /// Create a [`FetchingSubscriber`](super::FetchingSubscriber). diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 4103504f13..425aa62592 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -398,7 +398,6 @@ impl Drop for LivelinessToken<'_> { /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() -/// .pull_mode() /// .res() /// .await /// .unwrap(); diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 79c9da9127..cb565053c9 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -244,12 +244,6 @@ impl Primitives for Face { msg.ext_nodeid.node_id, ); } - RequestBody::Pull(_) => { - pull_data(&self.tables.tables, &self.state.clone(), msg.wire_expr); - } - _ => { - log::error!("{} Unsupported request!", self); - } } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index c0d1bb4a34..89c6c40206 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -12,17 +12,15 @@ // ZettaScale Zenoh Team, // use super::face::FaceState; -use super::resource::{DataRoutes, Direction, PullCaches, Resource}; +use super::resource::{DataRoutes, Direction, Resource}; use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; use crate::net::routing::hat::HatTrait; -use std::borrow::Cow; use std::collections::HashMap; use std::sync::Arc; -use std::sync::RwLock; use zenoh_core::zread; -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_protocol::core::key_expr::keyexpr; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::{Mode, SubscriberId}; +use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::{declare::ext, Push}, @@ -83,13 +81,10 @@ pub(crate) fn declare_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); } drop(wtables); } @@ -148,13 +143,10 @@ pub(crate) fn undeclare_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); } Resource::clean(&mut res); drop(wtables); @@ -223,7 +215,6 @@ pub(crate) fn update_data_routes(tables: &Tables, res: &mut Arc) { pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { update_data_routes(tables, res); - update_matching_pulls(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { update_data_routes_from(tables, child); @@ -233,22 +224,17 @@ pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc( tables: &'a Tables, res: &'a Arc, -) -> Vec<(Arc, DataRoutes, Arc)> { +) -> Vec<(Arc, DataRoutes)> { let mut routes = vec![]; if res.context.is_some() { let mut expr = RoutingExpr::new(res, ""); - routes.push(( - res.clone(), - compute_data_routes(tables, &mut expr), - compute_matching_pulls(tables, &mut expr), - )); + routes.push((res.clone(), compute_data_routes(tables, &mut expr))); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { let mut expr = RoutingExpr::new(&match_, ""); let match_routes = compute_data_routes(tables, &mut expr); - let matching_pulls = compute_matching_pulls(tables, &mut expr); - routes.push((match_, match_routes, matching_pulls)); + routes.push((match_, match_routes)); } } } @@ -258,12 +244,10 @@ pub(crate) fn compute_matches_data_routes<'a>( pub(crate) fn update_matches_data_routes<'a>(tables: &'a mut Tables, res: &'a mut Arc) { if res.context.is_some() { update_data_routes(tables, res); - update_matching_pulls(tables, res); for match_ in &res.context().matches { let mut match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { update_data_routes(tables, &mut match_); - update_matching_pulls(tables, &mut match_); } } } @@ -278,9 +262,6 @@ pub(crate) fn disable_matches_data_routes(_tables: &mut Tables, res: &mut Arc Arc { - let mut pull_caches = PullCaches::default(); - compute_matching_pulls_(tables, &mut pull_caches, expr); - Arc::new(pull_caches) -} - -pub(crate) fn update_matching_pulls(tables: &Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - if res_mut.context_mut().matching_pulls.is_none() { - res_mut.context_mut().matching_pulls = Some(Arc::new(PullCaches::default())); - } - compute_matching_pulls_( - tables, - get_mut_unchecked(res_mut.context_mut().matching_pulls.as_mut().unwrap()), - &mut RoutingExpr::new(res, ""), - ); - } -} - -#[inline] -fn get_matching_pulls( - tables: &Tables, - res: &Option>, - expr: &mut RoutingExpr, -) -> Arc { - res.as_ref() - .and_then(|res| res.context.as_ref()) - .and_then(|ctx| ctx.matching_pulls.clone()) - .unwrap_or_else(|| compute_matching_pulls(tables, expr)) -} - -macro_rules! cache_data { - ( - $matching_pulls:expr, - $expr:expr, - $payload:expr - ) => { - for context in $matching_pulls.iter() { - get_mut_unchecked(&mut context.clone()) - .last_values - .insert($expr.full_expr().to_string(), $payload.clone()); - } - }; -} - #[cfg(feature = "stats")] macro_rules! inc_stats { ( @@ -497,12 +406,10 @@ pub fn full_reentrant_route_data( let route = get_data_route(&tables, face, &res, &mut expr, routing_context); - let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); - - if !(route.is_empty() && matching_pulls.is_empty()) { + if !route.is_empty() { treat_timestamp!(&tables.hlc, payload, tables.drop_future_timestamp); - if route.len() == 1 && matching_pulls.len() == 0 { + if route.len() == 1 { let (outface, key_expr, context) = route.values().next().unwrap(); if tables .hat_code @@ -524,26 +431,43 @@ pub fn full_reentrant_route_data( payload, }) } - } else { - if !matching_pulls.is_empty() { - let lock = zlock!(tables.pull_caches_lock); - cache_data!(matching_pulls, expr, payload); - drop(lock); - } + } else if tables.whatami == WhatAmI::Router { + let route = route + .values() + .filter(|(outface, _key_expr, _context)| { + tables + .hat_code + .egress_filter(&tables, face, outface, &mut expr) + }) + .cloned() + .collect::>(); - if tables.whatami == WhatAmI::Router { - let route = route - .values() - .filter(|(outface, _key_expr, _context)| { - tables - .hat_code - .egress_filter(&tables, face, outface, &mut expr) - }) - .cloned() - .collect::>(); + drop(tables); + for (outface, key_expr, context) in route { + #[cfg(feature = "stats")] + if !admin { + inc_stats!(face, tx, user, payload) + } else { + inc_stats!(face, tx, admin, payload) + } - drop(tables); - for (outface, key_expr, context) in route { + outface.primitives.send_push(Push { + wire_expr: key_expr, + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: context }, + payload: payload.clone(), + }) + } + } else { + drop(tables); + for (outface, key_expr, context) in route.values() { + if face.id != outface.id + && match (face.mcast_group.as_ref(), outface.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { #[cfg(feature = "stats")] if !admin { inc_stats!(face, tx, user, payload) @@ -552,41 +476,13 @@ pub fn full_reentrant_route_data( } outface.primitives.send_push(Push { - wire_expr: key_expr, + wire_expr: key_expr.into(), ext_qos, ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: context }, + ext_nodeid: ext::NodeIdType { node_id: *context }, payload: payload.clone(), }) } - } else { - drop(tables); - for (outface, key_expr, context) in route.values() { - if face.id != outface.id - && match ( - face.mcast_group.as_ref(), - outface.mcast_group.as_ref(), - ) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, tx, user, payload) - } else { - inc_stats!(face, tx, admin, payload) - } - - outface.primitives.send_push(Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - payload: payload.clone(), - }) - } - } } } } @@ -597,68 +493,3 @@ pub fn full_reentrant_route_data( } } } - -pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireExpr) { - let tables = zread!(tables_ref); - match tables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - let res = get_mut_unchecked(&mut res); - match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(_subinfo) => { - // let reliability = subinfo.reliability; - let lock = zlock!(tables.pull_caches_lock); - let route = get_mut_unchecked(ctx) - .last_values - .drain() - .map(|(name, sample)| { - ( - Resource::get_best_key(&tables.root_res, &name, face.id) - .to_owned(), - sample, - ) - }) - .collect::>(); - drop(lock); - drop(tables); - for (key_expr, payload) in route { - face.primitives.send_push(Push { - wire_expr: key_expr, - ext_qos: ext::QoSType::PUSH, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - payload, - }); - } - } - None => { - log::error!( - "{} Pull data for unknown subscriber {} (no info)!", - face, - prefix.expr() + expr.suffix.as_ref() - ); - } - }, - None => { - log::error!( - "{} Pull data for unknown subscriber {} (no context)!", - face, - prefix.expr() + expr.suffix.as_ref() - ); - } - } - } - None => { - log::error!( - "{} Pull data for unknown subscriber {} (no resource)!", - face, - prefix.expr() + expr.suffix.as_ref() - ); - } - }, - None => { - log::error!("{} Pull data with unknown scope {}!", face, expr.scope); - } - }; -} diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 721a98b8c2..04262e555d 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -460,20 +460,12 @@ macro_rules! inc_req_stats { if let Some(stats) = $face.stats.as_ref() { use zenoh_buffers::buffer::Buffer; match &$body { - RequestBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); - } - RequestBody::Del(_) => { - stats.[<$txrx _z_del_msgs>].[](1); - } RequestBody::Query(q) => { stats.[<$txrx _z_query_msgs>].[](1); stats.[<$txrx _z_query_pl_bytes>].[]( q.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), ); } - RequestBody::Pull(_) => (), } } } @@ -492,14 +484,6 @@ macro_rules! inc_res_stats { if let Some(stats) = $face.stats.as_ref() { use zenoh_buffers::buffer::Buffer; match &$body { - ResponseBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - let mut n = p.payload.len(); - if let Some(a) = p.ext_attachment.as_ref() { - n += a.buffer.len(); - } - stats.[<$txrx _z_put_pl_bytes>].[](n); - } ResponseBody::Reply(r) => { stats.[<$txrx _z_reply_msgs>].[](1); let mut n = 0; diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 9f43841025..3e35db14b6 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -24,7 +24,6 @@ use zenoh_config::WhatAmI; #[cfg(feature = "complete_n")] use zenoh_protocol::network::request::ext::TargetType; use zenoh_protocol::network::RequestId; -use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::{ core::{key_expr::keyexpr, ExprId, WireExpr}, network::{ @@ -51,7 +50,6 @@ pub(crate) struct QueryTargetQabl { pub(crate) distance: f64, } pub(crate) type QueryTargetQablSet = Vec; -pub(crate) type PullCaches = Vec>; pub(crate) struct SessionContext { pub(crate) face: Arc, @@ -59,7 +57,6 @@ pub(crate) struct SessionContext { pub(crate) remote_expr_id: Option, pub(crate) subs: Option, pub(crate) qabl: Option, - pub(crate) last_values: HashMap, pub(crate) in_interceptor_cache: Option>, pub(crate) e_interceptor_cache: Option>, } @@ -121,7 +118,6 @@ impl QueryRoutes { pub(crate) struct ResourceContext { pub(crate) matches: Vec>, - pub(crate) matching_pulls: Option>, pub(crate) hat: Box, pub(crate) valid_data_routes: bool, pub(crate) data_routes: DataRoutes, @@ -133,7 +129,6 @@ impl ResourceContext { fn new(hat: Box) -> ResourceContext { ResourceContext { matches: Vec::new(), - matching_pulls: None, hat, valid_data_routes: false, data_routes: DataRoutes::default(), @@ -159,14 +154,6 @@ impl ResourceContext { pub(crate) fn disable_query_routes(&mut self) { self.valid_query_routes = false; } - - pub(crate) fn update_matching_pulls(&mut self, pulls: Arc) { - self.matching_pulls = Some(pulls); - } - - pub(crate) fn disable_matching_pulls(&mut self) { - self.matching_pulls = None; - } } pub struct Resource { @@ -445,7 +432,6 @@ impl Resource { remote_expr_id: None, subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) @@ -708,7 +694,6 @@ pub fn register_expr( remote_expr_id: Some(expr_id), subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index e239a316a1..4f2fc2ee83 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -71,7 +71,6 @@ pub struct Tables { pub(crate) mcast_groups: Vec>, pub(crate) mcast_faces: Vec>, pub(crate) interceptors: Vec, - pub(crate) pull_caches_lock: Mutex<()>, pub(crate) hat: Box, pub(crate) hat_code: Arc, // @TODO make this a Box } @@ -103,7 +102,6 @@ impl Tables { mcast_groups: vec![], mcast_faces: vec![], interceptors: interceptor_factories(config)?, - pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), hat_code: hat_code.into(), }) diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 05210bcaee..a9908f5f58 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -20,9 +20,7 @@ use crate::{ net::routing::{ dispatcher::face::Face, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, runtime::Runtime, }; @@ -192,11 +190,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -204,13 +198,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index f9f827ecc5..290f90f95f 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -94,16 +94,11 @@ fn register_client_subscription( { let res = get_mut_unchecked(res); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { res.session_ctxs.insert( face.id, @@ -113,7 +108,6 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -132,10 +126,8 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, ) { register_client_subscription(tables, face, id, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - propagate_simple_subscription(tables, res, &propa_sub_info, face); + propagate_simple_subscription(tables, res, sub_info, face); // This introduced a buffer overflow on windows // @TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -243,7 +235,6 @@ fn forget_client_subscription( pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; for src_face in tables .faces @@ -327,20 +318,19 @@ impl HatPubSubTrait for HatCode { let mres = mres.upgrade().unwrap(); for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { + if context.subs.is_some() + && match tables.whatami { WhatAmI::Router => context.face.whatami != WhatAmI::Router, _ => { source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); } + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 4964a8880a..81e5ba52d9 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -22,7 +22,7 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; @@ -133,7 +133,6 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 5591ea3b3e..3c4e2091f0 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -36,9 +36,7 @@ use crate::{ routing::{ dispatcher::face::Face, hat::TREES_COMPUTATION_DELAY_MS, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, runtime::Runtime, @@ -311,11 +309,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -323,13 +317,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 9a41915333..dddb6ae366 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -207,16 +207,11 @@ fn register_client_subscription( { let res = get_mut_unchecked(res); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { res.session_ctxs.insert( face.id, @@ -226,7 +221,6 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -245,10 +239,8 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, ) { register_client_subscription(tables, face, id, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; let zid = tables.zid; - register_peer_subscription(tables, face, res, &propa_sub_info, zid); + register_peer_subscription(tables, face, res, sub_info, zid); } #[inline] @@ -454,7 +446,6 @@ fn forget_client_subscription( pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; if face.whatami == WhatAmI::Client { @@ -511,7 +502,6 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec context.face.whatami != WhatAmI::Router, _ => { source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); } + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 51aac2175a..fa553e5121 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -285,7 +285,6 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 1a6c1ba407..59b39d4284 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -23,9 +23,7 @@ use crate::{ protocol::linkstate::LinkStateList, routing::{ dispatcher::face::Face, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, runtime::Runtime, @@ -241,11 +239,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -253,13 +247,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 4f6ce5aeca..a722176292 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -94,16 +94,11 @@ fn register_client_subscription( { let res = get_mut_unchecked(res); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { res.session_ctxs.insert( face.id, @@ -113,7 +108,6 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -132,10 +126,8 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, ) { register_client_subscription(tables, face, id, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - propagate_simple_subscription(tables, res, &propa_sub_info, face); + propagate_simple_subscription(tables, res, sub_info, face); // This introduced a buffer overflow on windows // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -243,7 +235,6 @@ fn forget_client_subscription( pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; for src_face in tables .faces @@ -327,20 +318,19 @@ impl HatPubSubTrait for HatCode { let mres = mres.upgrade().unwrap(); for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { + if context.subs.is_some() + && match tables.whatami { WhatAmI::Router => context.face.whatami != WhatAmI::Router, _ => { source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); } + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 04b31b41ef..caea6fe6b8 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -22,7 +22,7 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; @@ -133,7 +133,6 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index ff576ae271..47cf02db46 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -40,9 +40,7 @@ use crate::{ routing::{ dispatcher::face::Face, hat::TREES_COMPUTATION_DELAY_MS, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, runtime::Runtime, @@ -480,11 +478,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -492,13 +486,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index da1ca66efd..93c4cb7002 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -243,8 +243,7 @@ fn declare_peer_subscription( peer: ZenohId, ) { register_peer_subscription(tables, face, res, sub_info, peer); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; + let propa_sub_info = *sub_info; let zid = tables.zid; register_router_subscription(tables, face, res, &propa_sub_info, zid); } @@ -260,16 +259,11 @@ fn register_client_subscription( { let res = get_mut_unchecked(res); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { res.session_ctxs.insert( face.id, @@ -279,7 +273,6 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -298,10 +291,8 @@ fn declare_client_subscription( sub_info: &SubscriberInfo, ) { register_client_subscription(tables, face, id, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; let zid = tables.zid; - register_router_subscription(tables, face, res, &propa_sub_info, zid); + register_router_subscription(tables, face, res, sub_info, zid); } #[inline] @@ -600,7 +591,6 @@ fn forget_client_subscription( pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; if face.whatami == WhatAmI::Client { @@ -720,7 +710,6 @@ pub(super) fn pubsub_tree_change( if *sub == tree_id { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; send_sourced_subscription_to_net_childs( tables, @@ -799,7 +788,6 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: let key_expr = Resource::decl_key(res, dst_face); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -1003,14 +991,11 @@ impl HatPubSubTrait for HatCode { if master || source_type == WhatAmI::Router { for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if context.face.whatami != WhatAmI::Router && subinfo.mode == Mode::Push { - route.entry(*sid).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); - } + if context.subs.is_some() && context.face.whatami != WhatAmI::Router { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index b76f0adcc6..aca6f71b3e 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -413,7 +413,6 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, - last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index b67692e704..29106cb89d 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -388,58 +388,60 @@ impl Primitives for AdminSpace { fn send_request(&self, msg: Request) { trace!("recv Request {:?}", msg); - if let RequestBody::Query(query) = msg.payload { - let primitives = zlock!(self.primitives).as_ref().unwrap().clone(); - { - let conf = self.context.runtime.state.config.lock(); - if !conf.adminspace.permissions().read { - log::error!( + match msg.payload { + RequestBody::Query(query) => { + let primitives = zlock!(self.primitives).as_ref().unwrap().clone(); + { + let conf = self.context.runtime.state.config.lock(); + if !conf.adminspace.permissions().read { + log::error!( "Received GET on '{}' but adminspace.permissions.read=false in configuration", msg.wire_expr ); - primitives.send_response_final(ResponseFinal { - rid: msg.id, - ext_qos: ext::QoSType::RESPONSE_FINAL, - ext_tstamp: None, - }); - return; - } - } - - let key_expr = match self.key_expr_to_string(&msg.wire_expr) { - Ok(key_expr) => key_expr.into_owned(), - Err(e) => { - log::error!("Unknown KeyExpr: {}", e); - primitives.send_response_final(ResponseFinal { - rid: msg.id, - ext_qos: ext::QoSType::RESPONSE_FINAL, - ext_tstamp: None, - }); - return; + primitives.send_response_final(ResponseFinal { + rid: msg.id, + ext_qos: ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); + return; + } } - }; - - let zid = self.zid; - let parameters = query.parameters.to_owned(); - let query = Query { - inner: Arc::new(QueryInner { - key_expr: key_expr.clone(), - parameters, - value: query - .ext_body - .map(|b| Value::from(b.payload).with_encoding(b.encoding)), - qid: msg.id, - zid, - primitives, - #[cfg(feature = "unstable")] - attachment: query.ext_attachment.map(Into::into), - }), - eid: self.queryable_id, - }; - for (key, handler) in &self.handlers { - if key_expr.intersects(key) { - handler(&self.context, query.clone()); + let key_expr = match self.key_expr_to_string(&msg.wire_expr) { + Ok(key_expr) => key_expr.into_owned(), + Err(e) => { + log::error!("Unknown KeyExpr: {}", e); + primitives.send_response_final(ResponseFinal { + rid: msg.id, + ext_qos: ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); + return; + } + }; + + let zid = self.zid; + let parameters = query.parameters.to_owned(); + let query = Query { + inner: Arc::new(QueryInner { + key_expr: key_expr.clone(), + parameters, + value: query + .ext_body + .map(|b| Value::from(b.payload).with_encoding(b.encoding)), + qid: msg.id, + zid, + primitives, + #[cfg(feature = "unstable")] + attachment: query.ext_attachment.map(Into::into), + }), + eid: self.queryable_id, + }; + + for (key, handler) in &self.handlers { + if key_expr.intersects(key) { + handler(&self.context, query.clone()); + } } } } diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 4560eefaae..516bcd0109 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -26,7 +26,6 @@ use zenoh_protocol::core::{ key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, }; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::Mode; use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; use zenoh_protocol::zenoh::{PushBody, Put}; @@ -59,7 +58,6 @@ fn base_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; declare_subscription( @@ -186,7 +184,6 @@ fn multisub_test() { // -------------- let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; declare_subscription( zlock!(tables.ctrl_lock).as_ref(), @@ -305,7 +302,6 @@ fn clean_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; declare_subscription( @@ -570,7 +566,6 @@ fn client_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; let primitives0 = Arc::new(ClientPrimitives::new()); diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 5e706a0da8..496c6879ce 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -81,7 +81,7 @@ use zenoh_protocol::{ }, zenoh::{ query::{self, ext::QueryBodyType, Consolidation}, - Pull, PushBody, RequestBody, ResponseBody, + PushBody, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; @@ -294,7 +294,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -303,7 +303,6 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { session: self.clone(), key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), reliability: Reliability::DEFAULT, - mode: PushMode, origin: Locality::default(), handler: DefaultHandler, } @@ -578,7 +577,7 @@ impl<'a> SessionDeclarations<'a, 'a> for Session { fn declare_subscriber<'b, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -1556,29 +1555,6 @@ impl Session { } } - pub(crate) fn pull<'a>(&'a self, key_expr: &'a KeyExpr) -> impl Resolve> + 'a { - ResolveClosure::new(move || { - trace!("pull({:?})", key_expr); - let state = zread!(self.state); - let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); - primitives.send_request(Request { - id: 0, // @TODO compute a proper request ID - wire_expr: key_expr.to_wire(self).to_owned(), - ext_qos: ext::QoSType::REQUEST, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - ext_target: request::ext::TargetType::DEFAULT, - ext_budget: None, - ext_timeout: None, - payload: RequestBody::Pull(Pull { - ext_unknown: vec![], - }), - }); - Ok(()) - }) - } - #[allow(clippy::too_many_arguments)] pub(crate) fn query( &self, @@ -1819,7 +1795,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'static, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'static, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -1828,7 +1804,6 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), reliability: Reliability::DEFAULT, - mode: PushMode, origin: Locality::default(), handler: DefaultHandler, } @@ -2110,20 +2085,12 @@ impl Primitives for Session { #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), ), - RequestBody::Put(_) => (), - RequestBody::Del(_) => (), - RequestBody::Pull(_) => todo!(), } } fn send_response(&self, msg: Response) { trace!("recv Response {:?}", msg); match msg.payload { - ResponseBody::Put(_) => { - log::warn!( - "Received a ResponseBody::Put, but this isn't supported yet. Dropping message." - ) - } ResponseBody::Err(e) => { let mut state = zwrite!(self.state); match state.queries.get_mut(&msg.rid) { @@ -2453,7 +2420,7 @@ pub trait SessionDeclarations<'s, 'a> { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 413c9201f2..4488140610 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -24,10 +24,10 @@ use std::fmt; use std::future::Ready; use std::ops::{Deref, DerefMut}; use std::sync::Arc; -use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; #[cfg(feature = "unstable")] use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::network::declare::{subscriber::ext::SubscriberInfo, Mode}; +use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; /// The kind of reliability. pub use zenoh_protocol::core::Reliability; @@ -80,90 +80,6 @@ pub(crate) struct SubscriberInner<'a> { pub(crate) alive: bool, } -/// A [`PullMode`] subscriber that provides data through a callback. -/// -/// CallbackPullSubscribers only provide data when explicitely pulled by the -/// application with the [`pull`](CallbackPullSubscriber::pull) function. -/// CallbackPullSubscribers can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, -/// the [`callback`](SubscriberBuilder::callback) function -/// and the [`pull_mode`](SubscriberBuilder::pull_mode) function -/// of the resulting builder. -/// -/// Subscribers are automatically undeclared when dropped. -/// -/// # Examples -/// ``` -/// # async_std::task::block_on(async { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let subscriber = session -/// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) -/// .pull_mode() -/// .res() -/// .await -/// .unwrap(); -/// subscriber.pull(); -/// # }) -/// ``` -pub(crate) struct PullSubscriberInner<'a> { - inner: SubscriberInner<'a>, -} - -impl<'a> PullSubscriberInner<'a> { - /// Pull available data for a [`CallbackPullSubscriber`]. - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.pull(); - /// # }) - /// ``` - #[inline] - pub fn pull(&self) -> impl Resolve> + '_ { - self.inner.session.pull(&self.inner.state.key_expr) - } - - /// Close a [`CallbackPullSubscriber`](CallbackPullSubscriber). - /// - /// `CallbackPullSubscribers` are automatically closed when dropped, but you may want to use this function to handle errors or - /// close the `CallbackPullSubscriber` asynchronously. - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// # fn data_handler(_sample: Sample) { }; - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .callback(data_handler) - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); - /// # }) - /// ``` - #[inline] - pub fn undeclare(self) -> impl Resolve> + 'a { - Undeclarable::undeclare_inner(self.inner, ()) - } -} - impl<'a> SubscriberInner<'a> { /// Close a [`CallbackSubscriber`](CallbackSubscriber). /// @@ -248,28 +164,6 @@ impl Drop for SubscriberInner<'_> { } } -/// The mode for pull subscribers. -#[non_exhaustive] -#[derive(Debug, Clone, Copy)] -pub struct PullMode; - -impl From for Mode { - fn from(_: PullMode) -> Self { - Mode::Pull - } -} - -/// The mode for push subscribers. -#[non_exhaustive] -#[derive(Debug, Clone, Copy)] -pub struct PushMode; - -impl From for Mode { - fn from(_: PushMode) -> Self { - Mode::Push - } -} - /// A builder for initializing a [`FlumeSubscriber`]. /// /// # Examples @@ -281,7 +175,6 @@ impl From for Mode { /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() -/// .pull_mode() /// .res() /// .await /// .unwrap(); @@ -289,7 +182,7 @@ impl From for Mode { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct SubscriberBuilder<'a, 'b, Mode, Handler> { +pub struct SubscriberBuilder<'a, 'b, Handler> { #[cfg(feature = "unstable")] pub session: SessionRef<'a>, #[cfg(not(feature = "unstable"))] @@ -305,8 +198,6 @@ pub struct SubscriberBuilder<'a, 'b, Mode, Handler> { #[cfg(not(feature = "unstable"))] pub(crate) reliability: Reliability, - #[cfg(feature = "unstable")] - pub mode: Mode, #[cfg(not(feature = "unstable"))] pub(crate) mode: Mode, @@ -321,7 +212,7 @@ pub struct SubscriberBuilder<'a, 'b, Mode, Handler> { pub(crate) handler: Handler, } -impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { +impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// Receive the samples for this subscription with a callback. /// /// # Examples @@ -339,7 +230,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// # }) /// ``` #[inline] - pub fn callback(self, callback: Callback) -> SubscriberBuilder<'a, 'b, Mode, Callback> + pub fn callback(self, callback: Callback) -> SubscriberBuilder<'a, 'b, Callback> where Callback: Fn(Sample) + Send + Sync + 'static, { @@ -347,7 +238,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { session, key_expr, reliability, - mode, + origin, handler: _, } = self; @@ -355,7 +246,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { session, key_expr, reliability, - mode, + origin, handler: callback, } @@ -385,7 +276,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { pub fn callback_mut( self, callback: CallbackMut, - ) -> SubscriberBuilder<'a, 'b, Mode, impl Fn(Sample) + Send + Sync + 'static> + ) -> SubscriberBuilder<'a, 'b, impl Fn(Sample) + Send + Sync + 'static> where CallbackMut: FnMut(Sample) + Send + Sync + 'static, { @@ -412,7 +303,7 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// # }) /// ``` #[inline] - pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Mode, Handler> + pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Handler> where Handler: crate::prelude::IntoHandler<'static, Sample>, { @@ -420,7 +311,6 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { session, key_expr, reliability, - mode, origin, handler: _, } = self; @@ -428,13 +318,13 @@ impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { session, key_expr, reliability, - mode, origin, handler, } } } -impl<'a, 'b, Mode, Handler> SubscriberBuilder<'a, 'b, Mode, Handler> { + +impl<'a, 'b, Handler> SubscriberBuilder<'a, 'b, Handler> { /// Change the subscription reliability. #[inline] pub fn reliability(mut self, reliability: Reliability) -> Self { @@ -464,52 +354,10 @@ impl<'a, 'b, Mode, Handler> SubscriberBuilder<'a, 'b, Mode, Handler> { self.origin = origin; self } - - /// Change the subscription mode to Pull. - #[inline] - pub fn pull_mode(self) -> SubscriberBuilder<'a, 'b, PullMode, Handler> { - let SubscriberBuilder { - session, - key_expr, - reliability, - mode: _, - origin, - handler, - } = self; - SubscriberBuilder { - session, - key_expr, - reliability, - mode: PullMode, - origin, - handler, - } - } - - /// Change the subscription mode to Push. - #[inline] - pub fn push_mode(self) -> SubscriberBuilder<'a, 'b, PushMode, Handler> { - let SubscriberBuilder { - session, - key_expr, - reliability, - mode: _, - origin, - handler, - } = self; - SubscriberBuilder { - session, - key_expr, - reliability, - mode: PushMode, - origin, - handler, - } - } } // Push mode -impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PushMode, Handler> +impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, @@ -517,7 +365,7 @@ where type To = ZResult>; } -impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> +impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, @@ -534,7 +382,6 @@ where callback, &SubscriberInfo { reliability: self.reliability, - mode: self.mode.into(), }, ) .map(|sub_state| Subscriber { @@ -548,61 +395,7 @@ where } } -impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> -where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -// Pull mode -impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PullMode, Handler> -where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, -{ - type To = ZResult>; -} - -impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> -where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, -{ - fn res_sync(self) -> ::To { - let key_expr = self.key_expr?; - let session = self.session; - let (callback, receiver) = self.handler.into_handler(); - session - .declare_subscriber_inner( - &key_expr, - &None, - self.origin, - callback, - &SubscriberInfo { - reliability: self.reliability, - mode: self.mode.into(), - }, - ) - .map(|sub_state| PullSubscriber { - subscriber: PullSubscriberInner { - inner: SubscriberInner { - session, - state: sub_state, - alive: true, - }, - }, - receiver, - }) - } -} - -impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> +impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, @@ -647,102 +440,6 @@ pub struct Subscriber<'a, Receiver> { pub receiver: Receiver, } -/// A [`PullMode`] subscriber that provides data through a [`Handler`](crate::prelude::IntoHandler). -/// -/// PullSubscribers only provide data when explicitely pulled by the -/// application with the [`pull`](PullSubscriber::pull) function. -/// PullSubscribers can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, -/// the [`with`](SubscriberBuilder::with) function -/// and the [`pull_mode`](SubscriberBuilder::pull_mode) function -/// of the resulting builder. -/// -/// Subscribers are automatically undeclared when dropped. -/// -/// # Examples -/// ``` -/// # async_std::task::block_on(async { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let subscriber = session -/// .declare_subscriber("key/expression") -/// .with(flume::bounded(32)) -/// .pull_mode() -/// .res() -/// .await -/// .unwrap(); -/// subscriber.pull(); -/// # }) -/// ``` -#[non_exhaustive] -pub struct PullSubscriber<'a, Receiver> { - pub(crate) subscriber: PullSubscriberInner<'a>, - pub receiver: Receiver, -} - -impl<'a, Receiver> Deref for PullSubscriber<'a, Receiver> { - type Target = Receiver; - fn deref(&self) -> &Self::Target { - &self.receiver - } -} - -impl<'a, Receiver> DerefMut for PullSubscriber<'a, Receiver> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.receiver - } -} - -impl<'a, Receiver> PullSubscriber<'a, Receiver> { - /// Pull available data for a [`PullSubscriber`]. - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .with(flume::bounded(32)) - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.pull(); - /// # }) - /// ``` - #[inline] - pub fn pull(&self) -> impl Resolve> + '_ { - self.subscriber.pull() - } - - /// Close a [`PullSubscriber`]. - /// - /// Subscribers are automatically closed when dropped, but you may want to use this function to handle errors or - /// close the Subscriber asynchronously. - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session.declare_subscriber("key/expression") - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); - /// # }) - /// ``` - #[inline] - pub fn undeclare(self) -> impl Resolve> + 'a { - self.subscriber.undeclare() - } -} - impl<'a, Receiver> Subscriber<'a, Receiver> { /// Returns the [`EntityGlobalId`] of this Subscriber. /// From 5b18594a5084bc2f98fc14ee52078dffa2328eec Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 14:58:46 +0100 Subject: [PATCH 024/357] replaced sample new to put/delete --- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 250 ++++++++++-------- zenoh/src/sample.rs | 70 +++-- zenoh/src/session.rs | 6 +- 4 files changed, 192 insertions(+), 136 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index fb46b78082..0df648409d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -108,7 +108,7 @@ impl Aligner { let Value { payload, encoding, .. } = value; - let sample = Sample::new(key, payload) + let sample = Sample::put(key, payload) .with_encoding(encoding) .with_timestamp(ts); log::debug!("[ALIGNER] Adding {:?} to storage", sample); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 0708dcabd9..9e4ae7ad0e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -19,12 +19,12 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use std::collections::{HashMap, HashSet}; -use std::str::{self, FromStr}; +use std::str::FromStr; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; -use zenoh::time::{Timestamp, NTP64}; +use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; @@ -39,9 +39,102 @@ pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; #[derive(Clone)] -struct Update { - kind: SampleKind, - data: StoredData, +enum Update { + Put(StoredData), + Delete(Timestamp), +} + +impl From for Update { + fn from(sample: Sample) -> Self { + let mut sample = sample; + let timestamp = *sample.ensure_timestamp(); + match sample.kind() { + SampleKind::Put => Update::Put(StoredData { + value: Value::from(sample), + timestamp, + }), + SampleKind::Delete => Update::Delete(timestamp), + } + } +} + +impl Update { + fn timestamp(&self) -> &Timestamp { + match self { + Update::Put(data) => &data.timestamp, + Update::Delete(ts) => ts, + } + } +} + +// implement from String for Update +impl TryFrom for Update { + type Error = zenoh::Error; + + fn try_from(value: String) -> Result { + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&value)?; + let mut payload = ZBuf::default(); + for slice in result.3 { + payload.push_zslice(slice.to_vec().into()); + } + let value = Value::new(payload).with_encoding(result.2); + let timestamp = Timestamp::from_str(&result.1).map_err(|_|"Error parsing timestamp")?; + if result .0.eq(&(SampleKind::Put).to_string()) { + Ok(Update::Put(StoredData { value, timestamp })) + } else { + Ok(Update::Delete(timestamp)) + } + } +} + +// implement to_string for Update +impl ToString for Update { + fn to_string(&self) -> String { + let result = match self { + Update::Put(data) => ( + SampleKind::Put.to_string(), + data.timestamp.to_string(), + data.value.encoding.to_string(), + data.value.payload.slices().collect::>(), + ), + Update::Delete(ts) => ( + SampleKind::Delete.to_string(), + ts.to_string(), + "".to_string(), + vec![], + ), + }; + serde_json::to_string_pretty(&result).unwrap() + } +} + +trait IntoSample { + fn into_sample(self, key_expr: IntoKeyExpr) -> Sample + where + IntoKeyExpr: Into>; +} + +impl IntoSample for StoredData { + fn into_sample(self, key_expr: IntoKeyExpr) -> Sample + where + IntoKeyExpr: Into>, + { + Sample::put(key_expr, self.value.payload) + .with_encoding(self.value.encoding) + .with_timestamp(self.timestamp) + } +} + +impl IntoSample for Update { + fn into_sample(self, key_expr: IntoKeyExpr) -> Sample + where + IntoKeyExpr: Into>, + { + match self { + Update::Put(data) => data.into_sample(key_expr), + Update::Delete(ts) => Sample::delete(key_expr).with_timestamp(ts), + } + } } pub struct ReplicationService { @@ -109,10 +202,10 @@ impl StorageService { let saved_wc = std::fs::read_to_string(zenoh_home().join(WILDCARD_UPDATES_FILENAME)).unwrap(); let saved_wc: HashMap = - serde_json::from_str(&saved_wc).unwrap(); + serde_json::from_str(&saved_wc).unwrap(); // TODO: Remove unwrap let mut wildcard_updates = storage_service.wildcard_updates.write().await; for (k, data) in saved_wc { - wildcard_updates.insert(&k, construct_update(data)); + wildcard_updates.insert(&k, Update::try_from(data).unwrap()); // TODO: Remove unwrap } } } @@ -269,6 +362,7 @@ impl StorageService { } else { sample }; + let sample_timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); // if wildcard, update wildcard_updates if sample.key_expr().is_wild() { @@ -297,28 +391,17 @@ impl StorageService { log::trace!( "Sample `{:?}` identified as neded processing for key {}", sample, - k + &k ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store = match self - .ovderriding_wild_update(&k, sample.timestamp().unwrap()) - .await - { - Some(overriding_update) => { - let Value { - payload, encoding, .. - } = overriding_update.data.value; - Sample::new(KeyExpr::from(k.clone()), payload) - .with_encoding(encoding) - .with_timestamp(overriding_update.data.timestamp) - .with_kind(overriding_update.kind) - } - None => Sample::new(KeyExpr::from(k.clone()), sample.payload().clone()) - .with_encoding(sample.encoding().clone()) - .with_timestamp(*sample.timestamp().unwrap()) - .with_kind(sample.kind()), - }; + let sample_to_store = + match self.ovderriding_wild_update(&k, &sample_timestamp).await { + Some(overriding_update) => overriding_update.into_sample(k.clone()), + + None => sample.clone(), + }; + let timestamp = sample_to_store.timestamp().unwrap_or(&sample_timestamp); let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { Ok(stripped) => stripped, @@ -341,11 +424,8 @@ impl StorageService { } SampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) - .await; - storage - .delete(stripped_key, *sample_to_store.timestamp().unwrap()) - .await + self.mark_tombstone(&k, *timestamp).await; + storage.delete(stripped_key, *timestamp).await } }; drop(storage); @@ -393,22 +473,12 @@ impl StorageService { // @TODO: change into a better store that does incremental writes let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; - let timestamp = *sample.timestamp().unwrap(); - wildcards.insert( - &key, - Update { - kind: sample.kind(), - data: StoredData { - value: Value::from(sample), - timestamp, - }, - }, - ); + wildcards.insert(&key, sample.into()); if self.capability.persistence.eq(&Persistence::Durable) { // flush to disk to makeit durable let mut serialized_data = HashMap::new(); for (k, update) in wildcards.key_value_pairs() { - serialized_data.insert(k, serialize_update(update)); + serialized_data.insert(k, update.to_string()); } if let Err(e) = std::fs::write( zenoh_home().join(WILDCARD_UPDATES_FILENAME), @@ -437,34 +507,36 @@ impl StorageService { let mut update = None; for node in wildcards.intersecting_keys(key_expr) { let weight = wildcards.weight_at(&node); - if weight.is_some() && weight.unwrap().data.timestamp > *ts { - // if the key matches a wild card update, check whether it was saved in storage - // remember that wild card updates change only existing keys - let stripped_key = match self.strip_prefix(&key_expr.into()) { - Ok(stripped) => stripped, - Err(e) => { - log::error!("{}", e); - break; - } - }; - let mut storage = self.storage.lock().await; - match storage.get(stripped_key, "").await { - Ok(stored_data) => { - for entry in stored_data { - if entry.timestamp > *ts { - return None; + if let Some(weight) = weight { + if weight.timestamp() > ts { + // if the key matches a wild card update, check whether it was saved in storage + // remember that wild card updates change only existing keys + let stripped_key = match self.strip_prefix(&key_expr.into()) { + Ok(stripped) => stripped, + Err(e) => { + log::error!("{}", e); + break; + } + }; + let mut storage = self.storage.lock().await; + match storage.get(stripped_key, "").await { + Ok(stored_data) => { + for entry in stored_data { + if entry.timestamp > *ts { + return None; + } } } - } - Err(e) => { - log::warn!( - "Storage '{}' raised an error fetching a query on key {} : {}", - self.name, - key_expr, - e - ); - ts = &weight.unwrap().data.timestamp; - update = Some(weight.unwrap().clone()); + Err(e) => { + log::warn!( + "Storage '{}' raised an error fetching a query on key {} : {}", + self.name, + key_expr, + e + ); + ts = weight.timestamp(); + update = Some(weight.clone()); + } } } } @@ -517,12 +589,7 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let Value { - payload, encoding, .. - } = entry.value; - let sample = Sample::new(key.clone(), payload) - .with_encoding(encoding) - .with_timestamp(entry.timestamp); + let sample = entry.into_sample(key.clone()); // apply outgoing interceptor on results let sample = if let Some(ref interceptor) = self.out_interceptor { interceptor(sample) @@ -558,7 +625,7 @@ impl StorageService { let Value { payload, encoding, .. } = entry.value; - let sample = Sample::new(q.key_expr().clone(), payload) + let sample = Sample::put(q.key_expr().clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); // apply outgoing interceptor on results @@ -687,35 +754,6 @@ impl StorageService { } } -fn serialize_update(update: &Update) -> String { - let result = ( - update.kind.to_string(), - update.data.timestamp.to_string(), - update.data.value.encoding.to_string(), - update.data.value.payload.slices().collect::>(), - ); - serde_json::to_string_pretty(&result).unwrap() -} - -fn construct_update(data: String) -> Update { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() - let mut payload = ZBuf::default(); - for slice in result.3 { - payload.push_zslice(slice.to_vec().into()); - } - let value = Value::new(payload).with_encoding(result.2); - let data = StoredData { - value, - timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() - }; - let kind = if result.0.eq(&(SampleKind::Put).to_string()) { - SampleKind::Put - } else { - SampleKind::Delete - }; - Update { kind, data } -} - // Periodic event cleaning-up data info for old metadata struct GarbageCollectionEvent { config: GarbageCollectionConfig, @@ -747,7 +785,7 @@ impl Timed for GarbageCollectionEvent { let mut to_be_removed = HashSet::new(); for (k, update) in wildcard_updates.key_value_pairs() { - let ts = update.data.timestamp; + let ts = update.timestamp(); if ts.get_time() < &time_limit { // mark key to be removed to_be_removed.insert(k); diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index e294fd2c0c..757d65afd8 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -378,9 +378,9 @@ pub struct Sample { } impl Sample { - /// Creates a new Sample. + /// Creates a "put" Sample. #[inline] - pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self + pub fn put(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self where IntoKeyExpr: Into>, IntoPayload: Into, @@ -389,7 +389,7 @@ impl Sample { key_expr: key_expr.into(), payload: payload.into(), encoding: Encoding::default(), - kind: SampleKind::default(), + kind: SampleKind::Put, timestamp: None, qos: QoS::default(), #[cfg(feature = "unstable")] @@ -398,29 +398,55 @@ impl Sample { attachment: None, } } - /// Creates a new Sample. + + /// Creates a "delete" Sample. #[inline] - pub fn try_from( - key_expr: TryIntoKeyExpr, - payload: IntoPayload, - ) -> Result + pub fn delete(key_expr: IntoKeyExpr) -> Self where - TryIntoKeyExpr: TryInto>, - >>::Error: Into, - IntoPayload: Into, + IntoKeyExpr: Into>, { - Ok(Sample { - key_expr: key_expr.try_into().map_err(Into::into)?, - payload: payload.into(), + Sample { + key_expr: key_expr.into(), + payload: Payload::empty(), encoding: Encoding::default(), - kind: SampleKind::default(), + kind: SampleKind::Delete, timestamp: None, qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), #[cfg(feature = "unstable")] attachment: None, - }) + } + } + + /// Attempts to create a "put" Sample + #[inline] + pub fn try_put( + key_expr: TryIntoKeyExpr, + payload: TryIntoPayload, + ) -> Result + where + TryIntoKeyExpr: TryInto>, + >>::Error: Into, + TryIntoPayload: TryInto, + >::Error: Into, + { + let key_expr: KeyExpr<'static> = key_expr.try_into().map_err(Into::into)?; + let payload: Payload = payload.try_into().map_err(Into::into)?; + Ok(Self::put(key_expr, payload)) + } + + /// Attempts to create a "delete" Sample + #[inline] + pub fn try_delete( + key_expr: TryIntoKeyExpr, + ) -> Result + where + TryIntoKeyExpr: TryInto>, + >>::Error: Into, + { + let key_expr: KeyExpr<'static> = key_expr.try_into().map_err(Into::into)?; + Ok(Self::delete(key_expr)) } /// Creates a new Sample with optional data info. @@ -444,9 +470,10 @@ impl Sample { self } - /// Sets the encoding of this Sample. + /// Sets the encoding of this Sample #[inline] pub fn with_encoding(mut self, encoding: Encoding) -> Self { + assert!(self.kind == SampleKind::Put, "Cannot set encoding on a delete sample"); self.encoding = encoding; self } @@ -469,15 +496,6 @@ impl Sample { self.kind } - /// Sets the kind of this Sample. - #[inline] - #[doc(hidden)] - #[zenoh_macros::unstable] - pub fn with_kind(mut self, kind: SampleKind) -> Self { - self.kind = kind; - self - } - /// Gets the encoding of this sample #[inline] pub fn encoding(&self) -> &Encoding { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 5e706a0da8..93d1e2fb9d 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -1538,7 +1538,7 @@ impl Session { let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { #[allow(unused_mut)] - let mut sample = Sample::new(key_expr, payload.clone()).with_info(info.clone()); + let mut sample = Sample::put(key_expr, payload.clone()).with_info(info.clone()); #[cfg(feature = "unstable")] { sample.attachment = attachment.clone(); @@ -1547,7 +1547,7 @@ impl Session { } if let Some((cb, key_expr)) = last { #[allow(unused_mut)] - let mut sample = Sample::new(key_expr, payload).with_info(info); + let mut sample = Sample::put(key_expr, payload).with_info(info); #[cfg(feature = "unstable")] { sample.attachment = attachment; @@ -2257,7 +2257,7 @@ impl Primitives for Session { #[allow(unused_mut)] let mut sample = - Sample::new(key_expr.into_owned(), payload).with_info(Some(info)); + Sample::put(key_expr.into_owned(), payload).with_info(Some(info)); #[cfg(feature = "unstable")] { sample.attachment = attachment; From 1038beb92c438d477215813eaed9c173d9785f94 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 16:14:06 +0100 Subject: [PATCH 025/357] interceptors removed --- .../src/replica/storage.rs | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 9e4ae7ad0e..ed7c6a1d9c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -153,8 +153,6 @@ pub struct StorageService { capability: Capability, tombstones: Arc>>, wildcard_updates: Arc>>, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, replication: Option, } @@ -178,8 +176,6 @@ impl StorageService { capability: store_intercept.capability, tombstones: Arc::new(RwLock::new(KeBoxTree::default())), wildcard_updates: Arc::new(RwLock::new(KeBoxTree::default())), - in_interceptor: store_intercept.in_interceptor, - out_interceptor: store_intercept.out_interceptor, replication, }; if storage_service @@ -356,12 +352,6 @@ impl StorageService { // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - // Call incoming data interceptor (if any) - let sample = if let Some(ref interceptor) = self.in_interceptor { - interceptor(sample) - } else { - sample - }; let sample_timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); // if wildcard, update wildcard_updates @@ -590,12 +580,6 @@ impl StorageService { Ok(stored_data) => { for entry in stored_data { let sample = entry.into_sample(key.clone()); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", @@ -628,12 +612,6 @@ impl StorageService { let sample = Sample::put(q.key_expr().clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", From 09a84b34ad9be1fa39b4f0dd268d722d6a211f72 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 16:29:14 +0100 Subject: [PATCH 026/357] interceptors removed --- plugins/zenoh-backend-example/src/lib.rs | 13 ++---------- plugins/zenoh-backend-traits/src/lib.rs | 8 -------- .../src/backends_mgt.rs | 7 ------- .../zenoh-plugin-storage-manager/src/lib.rs | 4 ---- .../src/memory_backend/mod.rs | 20 ------------------- 5 files changed, 2 insertions(+), 50 deletions(-) diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 602d29f375..f81231a498 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -13,11 +13,8 @@ // use async_std::sync::RwLock; use async_trait::async_trait; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; -use zenoh::{prelude::OwnedKeyExpr, sample::Sample, time::Timestamp, value::Value}; +use std::collections::{hash_map::Entry, HashMap}; +use zenoh::{prelude::OwnedKeyExpr, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, @@ -71,12 +68,6 @@ impl Volume for ExampleBackend { async fn create_storage(&self, _props: StorageConfig) -> ZResult> { Ok(Box::::default()) } - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } } #[async_trait] diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index d17e6dfd77..91e030d361 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -210,14 +210,6 @@ pub trait Volume: Send + Sync { /// Creates a storage configured with some properties. async fn create_storage(&self, props: StorageConfig) -> ZResult>; - - /// Returns an interceptor that will be called before pushing any data - /// into a storage created by this backend. `None` can be returned for no interception point. - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>>; - - /// Returns an interceptor that will be called before sending any reply - /// to a query from a storage created by this backend. `None` can be returned for no interception point. - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>>; } pub type VolumeInstance = Box; diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index aa7260e868..90a6ae6250 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,7 +14,6 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; -use zenoh::prelude::r#async::*; use zenoh::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; @@ -23,16 +22,12 @@ use zenoh_result::ZResult; pub struct StoreIntercept { pub storage: Box, pub capability: Capability, - pub in_interceptor: Option Sample + Send + Sync>>, - pub out_interceptor: Option Sample + Send + Sync>>, } pub(crate) async fn create_and_start_storage( admin_key: String, config: StorageConfig, backend: &VolumeInstance, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, zenoh: Arc, ) -> ZResult> { log::trace!("Create storage '{}'", &admin_key); @@ -41,8 +36,6 @@ pub(crate) async fn create_and_start_storage( let store_intercept = StoreIntercept { storage, capability, - in_interceptor, - out_interceptor, }; start_storage(store_intercept, config, admin_key, zenoh).await diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 0db30bbd6a..91df2f108d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -239,14 +239,10 @@ impl StorageRuntimeInner { volume_id, backend.name() ); - let in_interceptor = backend.instance().incoming_data_interceptor(); - let out_interceptor = backend.instance().outgoing_data_interceptor(); let stopper = async_std::task::block_on(create_and_start_storage( admin_key, storage.clone(), backend.instance(), - in_interceptor, - out_interceptor, self.session.clone(), ))?; self.storages diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index ebb4922c9d..4e333b8592 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -61,26 +61,6 @@ impl Volume for MemoryBackend { log::debug!("Create Memory Storage with configuration: {:?}", properties); Ok(Box::new(MemoryStorage::new(properties).await?)) } - - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!(">>>> IN INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } - - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!("<<<< OUT INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } } impl Drop for MemoryBackend { From 886c37c1922b7882fa3c670f5ad71b1662857729 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 17:13:02 +0100 Subject: [PATCH 027/357] storage sample added --- .../src/replica/storage.rs | 144 +++++++++++------- 1 file changed, 91 insertions(+), 53 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index ed7c6a1d9c..41a456e344 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -27,7 +27,7 @@ use zenoh::query::ConsolidationMode; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; +use zenoh_backend_traits::{Capability, History, Persistence, Storage, StorageInsertionResult, StoredData}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -38,22 +38,52 @@ use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; +#[derive(Clone, Debug)] +enum StorageSampleKind { + Put(Value), + Delete, +} + +#[derive(Clone, Debug)] +struct StorageSample { + pub key_expr: KeyExpr<'static>, + pub timestamp: Timestamp, + pub kind: StorageSampleKind, +} + +impl From for StorageSample { + fn from(sample: Sample) -> Self { + let timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); + // TODO: add API for disassembly of Sample + let key_expr = sample.key_expr().clone(); + let payload = sample.payload().clone(); + let encoding = sample.encoding().clone(); + let kind = match sample.kind() { + SampleKind::Put => StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)), + SampleKind::Delete => StorageSampleKind::Delete, + }; + StorageSample { + key_expr, + timestamp, + kind, + } + } +} + #[derive(Clone)] enum Update { Put(StoredData), Delete(Timestamp), } -impl From for Update { - fn from(sample: Sample) -> Self { - let mut sample = sample; - let timestamp = *sample.ensure_timestamp(); - match sample.kind() { - SampleKind::Put => Update::Put(StoredData { - value: Value::from(sample), - timestamp, +impl From for Update { + fn from(value: StorageSample) -> Self { + match value.kind { + StorageSampleKind::Put(data) => Update::Put(StoredData { + value: data, + timestamp: value.timestamp, }), - SampleKind::Delete => Update::Delete(timestamp), + StorageSampleKind::Delete => Update::Delete(value.timestamp), } } } @@ -78,8 +108,8 @@ impl TryFrom for Update { payload.push_zslice(slice.to_vec().into()); } let value = Value::new(payload).with_encoding(result.2); - let timestamp = Timestamp::from_str(&result.1).map_err(|_|"Error parsing timestamp")?; - if result .0.eq(&(SampleKind::Put).to_string()) { + let timestamp = Timestamp::from_str(&result.1).map_err(|_| "Error parsing timestamp")?; + if result.0.eq(&(SampleKind::Put).to_string()) { Ok(Update::Put(StoredData { value, timestamp })) } else { Ok(Update::Delete(timestamp)) @@ -90,7 +120,7 @@ impl TryFrom for Update { // implement to_string for Update impl ToString for Update { fn to_string(&self) -> String { - let result = match self { + let result = match self { Update::Put(data) => ( SampleKind::Put.to_string(), data.timestamp.to_string(), @@ -108,31 +138,41 @@ impl ToString for Update { } } -trait IntoSample { - fn into_sample(self, key_expr: IntoKeyExpr) -> Sample +trait IntoStorageSample { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample where IntoKeyExpr: Into>; } -impl IntoSample for StoredData { - fn into_sample(self, key_expr: IntoKeyExpr) -> Sample +impl IntoStorageSample for StoredData { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample where IntoKeyExpr: Into>, { - Sample::put(key_expr, self.value.payload) - .with_encoding(self.value.encoding) - .with_timestamp(self.timestamp) + StorageSample { + key_expr: key_expr.into(), + timestamp: self.timestamp, + kind: StorageSampleKind::Put(self.value), + } } } -impl IntoSample for Update { - fn into_sample(self, key_expr: IntoKeyExpr) -> Sample +impl IntoStorageSample for Update { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample where IntoKeyExpr: Into>, { match self { - Update::Put(data) => data.into_sample(key_expr), - Update::Delete(ts) => Sample::delete(key_expr).with_timestamp(ts), + Update::Put(data) => StorageSample { + key_expr: key_expr.into(), + timestamp: data.timestamp, + kind: StorageSampleKind::Put(data.value), + }, + Update::Delete(ts) => StorageSample { + key_expr: key_expr.into(), + timestamp: ts, + kind: StorageSampleKind::Delete, + }, } } } @@ -201,7 +241,8 @@ impl StorageService { serde_json::from_str(&saved_wc).unwrap(); // TODO: Remove unwrap let mut wildcard_updates = storage_service.wildcard_updates.write().await; for (k, data) in saved_wc { - wildcard_updates.insert(&k, Update::try_from(data).unwrap()); // TODO: Remove unwrap + wildcard_updates.insert(&k, Update::try_from(data).unwrap()); + // TODO: Remove unwrap } } } @@ -272,7 +313,7 @@ impl StorageService { log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { - self.process_sample(sample).await; + self.process_sample(sample.into()).await; } }, // on query on key_expr @@ -350,33 +391,32 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin - async fn process_sample(&self, sample: Sample) { + async fn process_sample(&self, sample: StorageSample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - let sample_timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); // if wildcard, update wildcard_updates - if sample.key_expr().is_wild() { + if sample.key_expr.is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr().is_wild() { - self.get_matching_keys(sample.key_expr()).await + let matching_keys = if sample.key_expr.is_wild() { + self.get_matching_keys(&sample.key_expr).await } else { - vec![sample.key_expr().clone().into()] + vec![sample.key_expr.clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr(), + sample.key_expr, matching_keys ); for k in matching_keys { if !self - .is_deleted(&k.clone(), sample.timestamp().unwrap()) + .is_deleted(&k.clone(), &sample.timestamp) .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, sample.timestamp().unwrap()).await)) + && self.is_latest(&k, &sample.timestamp).await)) { log::trace!( "Sample `{:?}` identified as neded processing for key {}", @@ -386,14 +426,13 @@ impl StorageService { // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage let sample_to_store = - match self.ovderriding_wild_update(&k, &sample_timestamp).await { + match self.ovderriding_wild_update(&k, &sample.timestamp).await { Some(overriding_update) => overriding_update.into_sample(k.clone()), - None => sample.clone(), + None => sample.into(), }; - let timestamp = sample_to_store.timestamp().unwrap_or(&sample_timestamp); - let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { + let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -401,22 +440,21 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = match sample.kind() { - SampleKind::Put => { + let result = match sample_to_store.kind { + StorageSampleKind::Put(data) => { storage .put( stripped_key, - Value::new(sample_to_store.payload().clone()) - .with_encoding(sample_to_store.encoding().clone()), - *sample_to_store.timestamp().unwrap(), + data, + sample_to_store.timestamp, ) .await - } - SampleKind::Delete => { + }, + StorageSampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, *timestamp).await; - storage.delete(stripped_key, *timestamp).await - } + self.mark_tombstone(&k, sample_to_store.timestamp).await; + storage.delete(stripped_key, sample_to_store.timestamp).await + }, }; drop(storage); if self.replication.is_some() @@ -428,7 +466,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), *sample_to_store.timestamp().unwrap())); + .send((k.clone(), sample_to_store.timestamp)); match sending { Ok(_) => (), Err(e) => { @@ -459,9 +497,9 @@ impl StorageService { } } - async fn register_wildcard_update(&self, sample: Sample) { + async fn register_wildcard_update(&self, sample: StorageSample) { // @TODO: change into a better store that does incremental writes - let key = sample.key_expr().clone(); + let key = sample.key_expr.clone(); let mut wildcards = self.wildcard_updates.write().await; wildcards.insert(&key, sample.into()); if self.capability.persistence.eq(&Persistence::Durable) { @@ -719,7 +757,7 @@ impl StorageService { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - self.process_sample(sample).await; + self.process_sample(sample.into()).await; } Err(e) => log::warn!( "Storage '{}' received an error to align query: {:?}", From 780c82a3cae1115e624141c929639648e6902e16 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 17:35:38 +0100 Subject: [PATCH 028/357] some compile error fixes --- .../src/replica/aligner.rs | 18 ++++++++++-------- .../src/replica/storage.rs | 10 +++++----- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 0df648409d..3f672382f1 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -12,6 +12,9 @@ // ZettaScale Zenoh Team, // +use crate::replica::storage::StorageSampleKind; + +use super::storage::StorageSample; use super::{Digest, EraType, LogEntry, Snapshotter}; use super::{CONTENTS, ERA, INTERVALS, SUBINTERVALS}; use async_std::sync::{Arc, RwLock}; @@ -29,7 +32,7 @@ pub struct Aligner { digest_key: OwnedKeyExpr, snapshotter: Arc, rx_digest: Receiver<(String, Digest)>, - tx_sample: Sender, + tx_sample: Sender, digests_processed: RwLock>, } @@ -38,7 +41,7 @@ impl Aligner { session: Arc, digest_key: OwnedKeyExpr, rx_digest: Receiver<(String, Digest)>, - tx_sample: Sender, + tx_sample: Sender, snapshotter: Arc, ) { let aligner = Aligner { @@ -105,12 +108,11 @@ impl Aligner { log::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { - let Value { - payload, encoding, .. - } = value; - let sample = Sample::put(key, payload) - .with_encoding(encoding) - .with_timestamp(ts); + let sample = StorageSample { + key_expr: key.into(), + timestamp: ts, + kind: StorageSampleKind::Put(value), + }; log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { log::error!("[ALIGNER] Error adding sample to storage: {}", e) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 41a456e344..307ca95680 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -39,13 +39,13 @@ pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; #[derive(Clone, Debug)] -enum StorageSampleKind { +pub enum StorageSampleKind { Put(Value), Delete, } #[derive(Clone, Debug)] -struct StorageSample { +pub struct StorageSample { pub key_expr: KeyExpr<'static>, pub timestamp: Timestamp, pub kind: StorageSampleKind, @@ -179,7 +179,7 @@ impl IntoStorageSample for Update { pub struct ReplicationService { pub empty_start: bool, - pub aligner_updates: Receiver, + pub aligner_updates: Receiver, pub log_propagation: Sender<(OwnedKeyExpr, Timestamp)>, } @@ -361,7 +361,7 @@ impl StorageService { } }; sample.ensure_timestamp(); - self.process_sample(sample).await; + self.process_sample(sample.into()).await; }, // on query on key_expr query = storage_queryable.recv_async() => { @@ -429,7 +429,7 @@ impl StorageService { match self.ovderriding_wild_update(&k, &sample.timestamp).await { Some(overriding_update) => overriding_update.into_sample(k.clone()), - None => sample.into(), + None => sample.clone().into(), }; let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { From af0d167f6a1b0bcdc6d09074c4e2960f93034e90 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 19 Mar 2024 17:54:31 +0100 Subject: [PATCH 029/357] removed interceptor proxy --- plugins/zenoh-backend-traits/src/lib.rs | 50 +------------------------ 1 file changed, 1 insertion(+), 49 deletions(-) diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 91e030d361..16c00f64af 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -135,9 +135,7 @@ use async_trait::async_trait; use const_format::concatcp; -use std::sync::Arc; -use zenoh::prelude::{KeyExpr, OwnedKeyExpr, Sample, Selector}; -use zenoh::queryable::ReplyBuilder; +use zenoh::prelude::OwnedKeyExpr; use zenoh::time::Timestamp; use zenoh::value::Value; pub use zenoh::Result as ZResult; @@ -274,49 +272,3 @@ pub trait Storage: Send + Sync { /// Remember to fetch the entry corresponding to the `None` key async fn get_all_entries(&self) -> ZResult, Timestamp)>>; } - -/// A wrapper around the [`zenoh::queryable::Query`] allowing to call the -/// OutgoingDataInterceptor (if any) before to send the reply -pub struct Query { - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, -} - -impl Query { - pub fn new( - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, - ) -> Query { - Query { q, interceptor } - } - - /// The full [`Selector`] of this Query. - #[inline(always)] - pub fn selector(&self) -> Selector<'_> { - self.q.selector() - } - - /// The key selector part of this Query. - #[inline(always)] - pub fn key_expr(&self) -> &KeyExpr<'static> { - self.q.key_expr() - } - - /// This Query's selector parameters. - #[inline(always)] - pub fn parameters(&self) -> &str { - self.q.parameters() - } - - /// Sends a Sample as a reply to this Query - pub fn reply(&self, sample: Sample) -> ReplyBuilder<'_> { - // Call outgoing intercerceptor - let sample = if let Some(ref interceptor) = self.interceptor { - interceptor(sample) - } else { - sample - }; - // Send reply - self.q.reply_sample(sample) - } -} From 067823d3aa514735e60b684b0807fedfcfeb8069 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 20 Mar 2024 18:01:54 +0100 Subject: [PATCH 030/357] sample builders --- .../src/replica/storage.rs | 33 +- zenoh-ext/src/querying_subscriber.rs | 9 +- zenoh/src/sample.rs | 434 ++++++++++++------ zenoh/src/session.rs | 39 +- 4 files changed, 350 insertions(+), 165 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 307ca95680..5aa6b92a99 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,10 +24,13 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; +use zenoh::sample::SampleBuilder; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, Storage, StorageInsertionResult, StoredData}; +use zenoh_backend_traits::{ + Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, +}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -353,14 +356,20 @@ impl StorageService { select!( // on sample for key_expr sample = storage_sub.recv_async() => { - let mut sample = match sample { + let sample = match sample { Ok(sample) => sample, Err(e) => { log::error!("Error in sample: {}", e); continue; } }; - sample.ensure_timestamp(); + let sample = if sample.timestamp().is_none() { + SampleBuilder::new(sample).with_current_timestamp().res_sync() + + + } else { + sample + }; self.process_sample(sample.into()).await; }, // on query on key_expr @@ -411,9 +420,7 @@ impl StorageService { ); for k in matching_keys { - if !self - .is_deleted(&k.clone(), &sample.timestamp) - .await + if !self.is_deleted(&k.clone(), &sample.timestamp).await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) && self.is_latest(&k, &sample.timestamp).await)) @@ -443,18 +450,16 @@ impl StorageService { let result = match sample_to_store.kind { StorageSampleKind::Put(data) => { storage - .put( - stripped_key, - data, - sample_to_store.timestamp, - ) + .put(stripped_key, data, sample_to_store.timestamp) .await - }, + } StorageSampleKind::Delete => { // register a tombstone self.mark_tombstone(&k, sample_to_store.timestamp).await; - storage.delete(stripped_key, sample_to_store.timestamp).await - }, + storage + .delete(stripped_key, sample_to_store.timestamp) + .await + } }; drop(storage); if self.replication.is_some() diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 8cb5480e58..7ca2730f57 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,6 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; +use zenoh::sample::SampleBuilder; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::Timestamp; use zenoh::Result as ZResult; @@ -655,7 +656,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { let sub_callback = { let state = state.clone(); let callback = callback.clone(); - move |mut s| { + move |s| { let state = &mut zlock!(state); if state.pending_fetches == 0 { callback(s); @@ -663,7 +664,11 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { log::trace!("Sample received while fetch in progress: push it to merge_queue"); // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. - s.ensure_timestamp(); + let s = if s.timestamp().is_none() { + SampleBuilder::new(s).with_current_timestamp().res_sync() + } else { + s + }; state.merge_queue.push(s); } } diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 757d65afd8..395191a0d6 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -20,10 +20,8 @@ use crate::time::{new_reception_timestamp, Timestamp}; use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; -use std::{ - convert::{TryFrom, TryInto}, - fmt, -}; +use std::{convert::TryFrom, fmt}; +use zenoh_core::{zresult, AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType}; @@ -57,6 +55,83 @@ pub(crate) struct DataInfo { pub qos: QoS, } +pub(crate) trait DataInfoIntoSample { + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into; +} + +impl DataInfoIntoSample for DataInfo { + // TODO: this is internal function. + // Technically it may create invalid sample (e.g. a delete sample with a payload and encoding) + // The test for it is intentionally not added to avoid inserting extra "if" into hot path. + // This need to be additionally investigated and measured. + #[inline] + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { + Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: self.kind, + encoding: self.encoding.unwrap_or_default(), + timestamp: self.timestamp, + qos: self.qos, + #[cfg(feature = "unstable")] + source_info: SourceInfo { + source_id: self.source_id, + source_sn: self.source_sn, + }, + #[cfg(feature = "unstable")] + attachment, + } + } +} + +impl DataInfoIntoSample for Option { + #[inline] + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { + if let Some(data_info) = self { + data_info.into_sample(key_expr, payload, attachment) + } else { + Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment, + } + } + } +} + /// Informations on the source of a zenoh [`Sample`]. #[zenoh_macros::unstable] #[derive(Debug, Clone)] @@ -359,125 +434,275 @@ impl TryFrom for SampleKind { #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; -/// A zenoh sample. -#[non_exhaustive] -#[derive(Clone, Debug)] -pub struct Sample { - pub(crate) key_expr: KeyExpr<'static>, - pub(crate) payload: Payload, - pub(crate) kind: SampleKind, - pub(crate) encoding: Encoding, - pub(crate) timestamp: Option, - pub(crate) qos: QoS, +pub struct SampleBuilder(Sample); - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, +impl SampleBuilder { + pub fn new(sample: Sample) -> Self { + Self(sample) + } - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub fn with_keyexpr(mut self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + self.0.key_expr = key_expr.into(); + self + } + + // pub(crate) fn with_kind(mut self, kind: SampleKind) -> Self { + // self.0.kind = kind; + // self + // } + + pub(crate) fn with_encoding(mut self, encoding: Encoding) -> Self { + self.0.encoding = encoding; + self + } + + pub(crate) fn with_payload(mut self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + self.0.payload = payload.into(); + self + } + + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.0.timestamp = Some(timestamp); + self + } + + pub fn with_current_timestamp(mut self) -> Self { + self.0.timestamp = Some(new_reception_timestamp()); + self + } + + pub fn with_qos(mut self, qos: QoS) -> Self { + self.0.qos = qos; + self + } + + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.0.source_info = source_info; + self + } + + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.0.attachment = Some(attachment); + self + } } -impl Sample { - /// Creates a "put" Sample. - #[inline] - pub fn put(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self +pub struct PutSampleBuilder(SampleBuilder); + +impl PutSampleBuilder { + pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self where IntoKeyExpr: Into>, IntoPayload: Into, { - Sample { + Self(SampleBuilder::new(Sample { key_expr: key_expr.into(), payload: payload.into(), - encoding: Encoding::default(), kind: SampleKind::Put, + encoding: Encoding::default(), timestamp: None, qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), #[cfg(feature = "unstable")] attachment: None, - } + })) } - /// Creates a "delete" Sample. - #[inline] - pub fn delete(key_expr: IntoKeyExpr) -> Self + pub fn with_payload(mut self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + self.0 = self.0.with_payload(payload); + self + } + + pub fn with_encoding(mut self, encoding: Encoding) -> Self { + self.0 = self.0.with_encoding(encoding); + self + } + + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.0 = self.0.with_timestamp(timestamp); + self + } + + pub fn with_current_timestamp(mut self) -> Self { + self.0 = self.0.with_current_timestamp(); + self + } + + pub fn with_qos(mut self, qos: QoS) -> Self { + self.0 = self.0.with_qos(qos); + self + } + + #[zenoh_macros::unstable] + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.0 = self.0.with_source_info(source_info); + self + } + + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.0 = self.0.with_attachment(attachment); + self + } +} + +pub struct DeleteSampleBuilder(SampleBuilder); + +impl DeleteSampleBuilder { + pub fn new(key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { - Sample { + Self(SampleBuilder::new(Sample { key_expr: key_expr.into(), payload: Payload::empty(), - encoding: Encoding::default(), kind: SampleKind::Delete, + encoding: Encoding::default(), timestamp: None, qos: QoS::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), #[cfg(feature = "unstable")] attachment: None, - } + })) } - - /// Attempts to create a "put" Sample - #[inline] - pub fn try_put( - key_expr: TryIntoKeyExpr, - payload: TryIntoPayload, - ) -> Result + pub fn with_keyexpr(mut self, key_expr: IntoKeyExpr) -> Self where - TryIntoKeyExpr: TryInto>, - >>::Error: Into, - TryIntoPayload: TryInto, - >::Error: Into, + IntoKeyExpr: Into>, { - let key_expr: KeyExpr<'static> = key_expr.try_into().map_err(Into::into)?; - let payload: Payload = payload.try_into().map_err(Into::into)?; - Ok(Self::put(key_expr, payload)) + self.0 = self.0.with_keyexpr(key_expr); + self + } + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.0 = self.0.with_timestamp(timestamp); + self + } + pub fn with_current_timestamp(mut self) -> Self { + self.0 = self.0.with_current_timestamp(); + self } + pub fn with_qos(mut self, qos: QoS) -> Self { + self.0 = self.0.with_qos(qos); + self + } + #[zenoh_macros::unstable] + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.0 = self.0.with_source_info(source_info); + self + } + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.0 = self.0.with_attachment(attachment); + self + } +} - /// Attempts to create a "delete" Sample - #[inline] - pub fn try_delete( - key_expr: TryIntoKeyExpr, - ) -> Result - where - TryIntoKeyExpr: TryInto>, - >>::Error: Into, - { - let key_expr: KeyExpr<'static> = key_expr.try_into().map_err(Into::into)?; - Ok(Self::delete(key_expr)) +impl From for SampleBuilder { + fn from(sample: Sample) -> Self { + SampleBuilder(sample) } +} - /// Creates a new Sample with optional data info. - #[inline] - pub(crate) fn with_info(mut self, mut data_info: Option) -> Self { - if let Some(mut data_info) = data_info.take() { - self.kind = data_info.kind; - if let Some(encoding) = data_info.encoding.take() { - self.encoding = encoding; - } - self.qos = data_info.qos; - self.timestamp = data_info.timestamp; - #[cfg(feature = "unstable")] - { - self.source_info = SourceInfo { - source_id: data_info.source_id, - source_sn: data_info.source_sn, - }; - } +impl TryFrom for PutSampleBuilder { + type Error = zresult::Error; + fn try_from(sample: Sample) -> Result { + if sample.kind != SampleKind::Put { + bail!("Sample is not a put sample") } - self + Ok(Self(SampleBuilder(sample))) } +} - /// Sets the encoding of this Sample - #[inline] - pub fn with_encoding(mut self, encoding: Encoding) -> Self { - assert!(self.kind == SampleKind::Put, "Cannot set encoding on a delete sample"); - self.encoding = encoding; - self +impl TryFrom for DeleteSampleBuilder { + type Error = zresult::Error; + fn try_from(sample: Sample) -> Result { + if sample.kind != SampleKind::Delete { + bail!("Sample is not a delete sample") + } + Ok(Self(SampleBuilder(sample))) + } +} + +impl Resolvable for SampleBuilder { + type To = Sample; +} + +impl Resolvable for PutSampleBuilder { + type To = Sample; +} + +impl Resolvable for DeleteSampleBuilder { + type To = Sample; +} + +impl SyncResolve for SampleBuilder { + fn res_sync(self) -> Self::To { + self.0 } +} + +impl SyncResolve for PutSampleBuilder { + fn res_sync(self) -> Self::To { + self.0.res_sync() + } +} + +impl SyncResolve for DeleteSampleBuilder { + fn res_sync(self) -> Self::To { + self.0.res_sync() + } +} +impl AsyncResolve for SampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + futures::future::ready(self.0) + } +} + +impl AsyncResolve for PutSampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + self.0.res_async() + } +} + +impl AsyncResolve for DeleteSampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + self.0.res_async() + } +} + +/// A zenoh sample. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub struct Sample { + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) payload: Payload, + pub(crate) kind: SampleKind, + pub(crate) encoding: Encoding, + pub(crate) timestamp: Option, + pub(crate) qos: QoS, + + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, + + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} + +impl Sample { /// Gets the key expression on which this Sample was published. #[inline] pub fn key_expr(&self) -> &KeyExpr<'static> { @@ -508,15 +733,6 @@ impl Sample { self.timestamp.as_ref() } - /// Sets the timestamp of this Sample. - #[inline] - #[doc(hidden)] - #[zenoh_macros::unstable] - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.timestamp = Some(timestamp); - self - } - /// Gets the quality of service settings this Sample was sent with. #[inline] pub fn qos(&self) -> &QoS { @@ -530,52 +746,12 @@ impl Sample { &self.source_info } - /// Sets the source info of this Sample. - #[zenoh_macros::unstable] - #[inline] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.source_info = source_info; - self - } - - /// Ensure that an associated Timestamp is present in this Sample. - /// If not, a new one is created with the current system time and 0x00 as id. - /// Get the timestamp of this sample (either existing one or newly created) - #[inline] - #[doc(hidden)] - #[zenoh_macros::unstable] - pub fn ensure_timestamp(&mut self) -> &Timestamp { - if let Some(ref timestamp) = self.timestamp { - timestamp - } else { - let timestamp = new_reception_timestamp(); - self.timestamp = Some(timestamp); - self.timestamp.as_ref().unwrap() - } - } - /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. #[zenoh_macros::unstable] #[inline] pub fn attachment(&self) -> Option<&Attachment> { self.attachment.as_ref() } - - /// Gets the mutable sample attachment: a map of key-value pairs, where each key and value are byte-slices. - #[inline] - #[doc(hidden)] - #[zenoh_macros::unstable] - pub fn attachment_mut(&mut self) -> &mut Option { - &mut self.attachment - } - - #[inline] - #[doc(hidden)] - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self - } } impl From for Value { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 93d1e2fb9d..0a63d82354 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -32,6 +32,7 @@ use crate::queryable::*; #[cfg(feature = "unstable")] use crate::sample::Attachment; use crate::sample::DataInfo; +use crate::sample::DataInfoIntoSample; use crate::sample::QoS; use crate::selector::TIME_RANGE_KEY; use crate::subscriber::*; @@ -1537,21 +1538,21 @@ impl Session { drop(state); let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { - #[allow(unused_mut)] - let mut sample = Sample::put(key_expr, payload.clone()).with_info(info.clone()); - #[cfg(feature = "unstable")] - { - sample.attachment = attachment.clone(); - } + let sample = info.clone().into_sample( + key_expr, + payload.clone(), + #[cfg(feature = "unstable")] + attachment.clone(), + ); cb(sample); } if let Some((cb, key_expr)) = last { - #[allow(unused_mut)] - let mut sample = Sample::put(key_expr, payload).with_info(info); - #[cfg(feature = "unstable")] - { - sample.attachment = attachment; - } + let sample = info.into_sample( + key_expr, + payload, + #[cfg(feature = "unstable")] + attachment.clone(), + ); cb(sample); } } @@ -2254,14 +2255,12 @@ impl Primitives for Session { attachment: _attachment.map(Into::into), }, }; - - #[allow(unused_mut)] - let mut sample = - Sample::put(key_expr.into_owned(), payload).with_info(Some(info)); - #[cfg(feature = "unstable")] - { - sample.attachment = attachment; - } + let sample = info.into_sample( + key_expr.into_owned(), + payload, + #[cfg(feature = "unstable")] + attachment, + ); let new_reply = Reply { sample: Ok(sample), replier_id: ZenohId::rand(), // TODO From 4f1ba2f11fabc36a9c6900fee77107fd256fc14f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 20 Mar 2024 19:32:38 +0100 Subject: [PATCH 031/357] compiles --- .../src/replica/storage.rs | 37 ++++++++++++------- zenoh/src/sample.rs | 29 +++++++++++++++ 2 files changed, 52 insertions(+), 14 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 5aa6b92a99..f90ea01754 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -28,9 +28,8 @@ use zenoh::sample::SampleBuilder; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{ - Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, -}; +use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; +use zenoh_core::SyncResolve; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -274,7 +273,12 @@ impl StorageService { t.add_async(gc).await; // subscribe on key_expr - let storage_sub = match self.session.declare_subscriber(&self.key_expr).res().await { + let storage_sub = match self + .session + .declare_subscriber(&self.key_expr) + .res_async() + .await + { Ok(storage_sub) => storage_sub, Err(e) => { log::error!("Error starting storage '{}': {}", self.name, e); @@ -287,7 +291,7 @@ impl StorageService { .session .declare_queryable(&self.key_expr) .complete(self.complete) - .res() + .res_async() .await { Ok(storage_queryable) => storage_queryable, @@ -365,8 +369,6 @@ impl StorageService { }; let sample = if sample.timestamp().is_none() { SampleBuilder::new(sample).with_current_timestamp().res_sync() - - } else { sample }; @@ -622,8 +624,12 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let sample = entry.into_sample(key.clone()); - if let Err(e) = q.reply_sample(sample).res().await { + if let Err(e) = q + .reply(key.clone(), entry.value.payload) + .with_timestamp(entry.timestamp) + .res_async() + .await + { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -652,10 +658,13 @@ impl StorageService { let Value { payload, encoding, .. } = entry.value; - let sample = Sample::put(q.key_expr().clone(), payload) + if let Err(e) = q + .reply(q.key_expr().clone(), payload) .with_encoding(encoding) - .with_timestamp(entry.timestamp); - if let Err(e) = q.reply_sample(sample).res().await { + .with_timestamp(entry.timestamp) + .res_async() + .await + { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -668,7 +677,7 @@ impl StorageService { let err_message = format!("Storage '{}' raised an error on query: {}", self.name, e); log::warn!("{}", err_message); - if let Err(e) = q.reply_err(err_message).res().await { + if let Err(e) = q.reply_err(err_message).res_async().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -750,7 +759,7 @@ impl StorageService { .get(KeyExpr::from(&self.key_expr).with_parameters("_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) - .res() + .res_async() .await { Ok(replies) => replies, diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 395191a0d6..29d46cca3e 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -684,6 +684,19 @@ impl AsyncResolve for DeleteSampleBuilder { } } +pub struct SampleDecomposed { + pub key_expr: KeyExpr<'static>, + pub payload: Payload, + pub kind: SampleKind, + pub encoding: Encoding, + pub timestamp: Option, + pub qos: QoS, + #[cfg(feature = "unstable")] + pub source_info: SourceInfo, + #[cfg(feature = "unstable")] + pub attachment: Option, +} + /// A zenoh sample. #[non_exhaustive] #[derive(Clone, Debug)] @@ -752,6 +765,22 @@ impl Sample { pub fn attachment(&self) -> Option<&Attachment> { self.attachment.as_ref() } + + /// Decomposes the Sample into its components + pub fn decompose(self) -> SampleDecomposed { + SampleDecomposed { + key_expr: self.key_expr, + payload: self.payload, + kind: self.kind, + encoding: self.encoding, + timestamp: self.timestamp, + qos: self.qos, + #[cfg(feature = "unstable")] + source_info: self.source_info, + #[cfg(feature = "unstable")] + attachment: self.attachment, + } + } } impl From for Value { From d7cb97a3705b82364a2b48557d025aa3bff156da Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 23 Mar 2024 00:41:17 +0100 Subject: [PATCH 032/357] SampleBuilderTrait --- .../src/replica/storage.rs | 4 +- zenoh-ext/src/querying_subscriber.rs | 8 +- zenoh/src/lib.rs | 1 + zenoh/src/sample.rs | 282 +--------------- zenoh/src/sample_builder.rs | 306 ++++++++++++++++++ 5 files changed, 315 insertions(+), 286 deletions(-) create mode 100644 zenoh/src/sample_builder.rs diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f90ea01754..576f6adec2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,7 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; -use zenoh::sample::SampleBuilder; +use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; @@ -368,7 +368,7 @@ impl StorageService { } }; let sample = if sample.timestamp().is_none() { - SampleBuilder::new(sample).with_current_timestamp().res_sync() + SampleBuilder::from(sample).with_timestamp(new_reception_timestamp()).res_sync() } else { sample }; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 7ca2730f57..eb6d6e9516 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,9 +20,9 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::SampleBuilder; +use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; use zenoh::subscriber::{Reliability, Subscriber}; -use zenoh::time::Timestamp; +use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; use zenoh::SessionRef; use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; @@ -665,7 +665,9 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. let s = if s.timestamp().is_none() { - SampleBuilder::new(s).with_current_timestamp().res_sync() + SampleBuilder::from(s) + .with_timestamp(new_reception_timestamp()) + .res_sync() } else { s }; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index eb1ba1bcd1..8618cb9a88 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -146,6 +146,7 @@ pub mod publication; pub mod query; pub mod queryable; pub mod sample; +pub mod sample_builder; pub mod subscriber; pub mod value; #[cfg(feature = "shared-memory")] diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 29d46cca3e..2c98d5ead1 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -16,12 +16,11 @@ use crate::encoding::Encoding; use crate::payload::Payload; use crate::prelude::{KeyExpr, Value}; -use crate::time::{new_reception_timestamp, Timestamp}; +use crate::time::Timestamp; use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; -use zenoh_core::{zresult, AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType}; @@ -434,269 +433,6 @@ impl TryFrom for SampleKind { #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; -pub struct SampleBuilder(Sample); - -impl SampleBuilder { - pub fn new(sample: Sample) -> Self { - Self(sample) - } - - pub fn with_keyexpr(mut self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - self.0.key_expr = key_expr.into(); - self - } - - // pub(crate) fn with_kind(mut self, kind: SampleKind) -> Self { - // self.0.kind = kind; - // self - // } - - pub(crate) fn with_encoding(mut self, encoding: Encoding) -> Self { - self.0.encoding = encoding; - self - } - - pub(crate) fn with_payload(mut self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { - self.0.payload = payload.into(); - self - } - - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.0.timestamp = Some(timestamp); - self - } - - pub fn with_current_timestamp(mut self) -> Self { - self.0.timestamp = Some(new_reception_timestamp()); - self - } - - pub fn with_qos(mut self, qos: QoS) -> Self { - self.0.qos = qos; - self - } - - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.0.source_info = source_info; - self - } - - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.0.attachment = Some(attachment); - self - } -} - -pub struct PutSampleBuilder(SampleBuilder); - -impl PutSampleBuilder { - pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - Self(SampleBuilder::new(Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: SampleKind::Put, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - })) - } - - pub fn with_payload(mut self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { - self.0 = self.0.with_payload(payload); - self - } - - pub fn with_encoding(mut self, encoding: Encoding) -> Self { - self.0 = self.0.with_encoding(encoding); - self - } - - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.0 = self.0.with_timestamp(timestamp); - self - } - - pub fn with_current_timestamp(mut self) -> Self { - self.0 = self.0.with_current_timestamp(); - self - } - - pub fn with_qos(mut self, qos: QoS) -> Self { - self.0 = self.0.with_qos(qos); - self - } - - #[zenoh_macros::unstable] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.0 = self.0.with_source_info(source_info); - self - } - - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.0 = self.0.with_attachment(attachment); - self - } -} - -pub struct DeleteSampleBuilder(SampleBuilder); - -impl DeleteSampleBuilder { - pub fn new(key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(SampleBuilder::new(Sample { - key_expr: key_expr.into(), - payload: Payload::empty(), - kind: SampleKind::Delete, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - })) - } - pub fn with_keyexpr(mut self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - self.0 = self.0.with_keyexpr(key_expr); - self - } - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.0 = self.0.with_timestamp(timestamp); - self - } - pub fn with_current_timestamp(mut self) -> Self { - self.0 = self.0.with_current_timestamp(); - self - } - pub fn with_qos(mut self, qos: QoS) -> Self { - self.0 = self.0.with_qos(qos); - self - } - #[zenoh_macros::unstable] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.0 = self.0.with_source_info(source_info); - self - } - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.0 = self.0.with_attachment(attachment); - self - } -} - -impl From for SampleBuilder { - fn from(sample: Sample) -> Self { - SampleBuilder(sample) - } -} - -impl TryFrom for PutSampleBuilder { - type Error = zresult::Error; - fn try_from(sample: Sample) -> Result { - if sample.kind != SampleKind::Put { - bail!("Sample is not a put sample") - } - Ok(Self(SampleBuilder(sample))) - } -} - -impl TryFrom for DeleteSampleBuilder { - type Error = zresult::Error; - fn try_from(sample: Sample) -> Result { - if sample.kind != SampleKind::Delete { - bail!("Sample is not a delete sample") - } - Ok(Self(SampleBuilder(sample))) - } -} - -impl Resolvable for SampleBuilder { - type To = Sample; -} - -impl Resolvable for PutSampleBuilder { - type To = Sample; -} - -impl Resolvable for DeleteSampleBuilder { - type To = Sample; -} - -impl SyncResolve for SampleBuilder { - fn res_sync(self) -> Self::To { - self.0 - } -} - -impl SyncResolve for PutSampleBuilder { - fn res_sync(self) -> Self::To { - self.0.res_sync() - } -} - -impl SyncResolve for DeleteSampleBuilder { - fn res_sync(self) -> Self::To { - self.0.res_sync() - } -} - -impl AsyncResolve for SampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - futures::future::ready(self.0) - } -} - -impl AsyncResolve for PutSampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - self.0.res_async() - } -} - -impl AsyncResolve for DeleteSampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - self.0.res_async() - } -} - -pub struct SampleDecomposed { - pub key_expr: KeyExpr<'static>, - pub payload: Payload, - pub kind: SampleKind, - pub encoding: Encoding, - pub timestamp: Option, - pub qos: QoS, - #[cfg(feature = "unstable")] - pub source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub attachment: Option, -} - /// A zenoh sample. #[non_exhaustive] #[derive(Clone, Debug)] @@ -765,22 +501,6 @@ impl Sample { pub fn attachment(&self) -> Option<&Attachment> { self.attachment.as_ref() } - - /// Decomposes the Sample into its components - pub fn decompose(self) -> SampleDecomposed { - SampleDecomposed { - key_expr: self.key_expr, - payload: self.payload, - kind: self.kind, - encoding: self.encoding, - timestamp: self.timestamp, - qos: self.qos, - #[cfg(feature = "unstable")] - source_info: self.source_info, - #[cfg(feature = "unstable")] - attachment: self.attachment, - } - } } impl From for Value { diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs new file mode 100644 index 0000000000..a9cffb22d1 --- /dev/null +++ b/zenoh/src/sample_builder.rs @@ -0,0 +1,306 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use crate::sample::Attachment; +use crate::sample::QoS; +use crate::sample::SourceInfo; +use crate::Encoding; +use crate::KeyExpr; +use crate::Payload; +use crate::Priority; +use crate::Sample; +use crate::SampleKind; +use uhlc::Timestamp; +use zenoh_core::zresult; +use zenoh_core::AsyncResolve; +use zenoh_core::Resolvable; +use zenoh_core::SyncResolve; +use zenoh_protocol::core::CongestionControl; + +pub trait SampleBuilderTrait { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>; + fn with_timestamp(self, timestamp: Timestamp) -> Self; + #[zenoh_macros::unstable] + fn with_source_info(self, source_info: SourceInfo) -> Self; + #[zenoh_macros::unstable] + fn with_attachment(self, attachment: Attachment) -> Self; + fn congestion_control(self, congestion_control: CongestionControl) -> Self; + fn priority(self, priority: Priority) -> Self; + fn express(self, is_express: bool) -> Self; +} + +pub trait PutSampleBuilderTrait: SampleBuilderTrait { + fn with_encoding(self, encoding: Encoding) -> Self; + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into; +} + +pub trait DeleteSampleBuilderTrait: SampleBuilderTrait {} + +pub struct SampleBuilder(Sample); + +impl SampleBuilderTrait for SampleBuilder { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + let mut this = self; + this.0.key_expr = key_expr.into(); + this + } + + fn with_timestamp(self, timestamp: Timestamp) -> Self { + let mut this = self; + this.0.timestamp = Some(timestamp); + this + } + #[zenoh_macros::unstable] + fn with_source_info(self, source_info: SourceInfo) -> Self { + let mut this = self; + this.0.source_info = source_info; + this + } + #[zenoh_macros::unstable] + fn with_attachment(self, attachment: Attachment) -> Self { + let mut this = self; + this.0.attachment = Some(attachment); + this + } + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let mut this = self; + this.0.qos = this.0.qos.with_congestion_control(congestion_control); + this + } + fn priority(self, priority: Priority) -> Self { + let mut this = self; + this.0.qos = this.0.qos.with_priority(priority); + this + } + fn express(self, is_express: bool) -> Self { + let mut this = self; + this.0.qos = this.0.qos.with_express(is_express); + this + } +} + +pub struct PutSampleBuilder(SampleBuilder); + +impl PutSampleBuilder { + pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { + Self(SampleBuilder::from(Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + })) + } + pub fn without_timestamp(self) -> Self { + let mut this = self; + this.0 .0.timestamp = None; + this + } + pub fn without_attachment(self) -> Self { + let mut this = self; + this.0 .0.attachment = None; + this + } +} + +impl SampleBuilderTrait for PutSampleBuilder { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(self.0.with_keyexpr(key_expr)) + } + fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self(self.0.with_timestamp(timestamp)) + } + #[zenoh_macros::unstable] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self(self.0.with_source_info(source_info)) + } + #[zenoh_macros::unstable] + fn with_attachment(self, attachment: Attachment) -> Self { + Self(self.0.with_attachment(attachment)) + } + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self(self.0.congestion_control(congestion_control)) + } + fn priority(self, priority: Priority) -> Self { + Self(self.0.priority(priority)) + } + fn express(self, is_express: bool) -> Self { + Self(self.0.express(is_express)) + } +} + +impl PutSampleBuilderTrait for PutSampleBuilder { + fn with_encoding(self, encoding: Encoding) -> Self { + let mut this = self; + this.0 .0.encoding = encoding; + this + } + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + let mut this = self; + this.0 .0.payload = payload.into(); + this + } +} + +pub struct DeleteSampleBuilder(SampleBuilder); + +impl DeleteSampleBuilder { + pub fn new(key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(SampleBuilder::from(Sample { + key_expr: key_expr.into(), + payload: Payload::empty(), + kind: SampleKind::Delete, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + })) + } +} + +impl SampleBuilderTrait for DeleteSampleBuilder { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(self.0.with_keyexpr(key_expr)) + } + fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self(self.0.with_timestamp(timestamp)) + } + #[zenoh_macros::unstable] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self(self.0.with_source_info(source_info)) + } + #[zenoh_macros::unstable] + fn with_attachment(self, attachment: Attachment) -> Self { + Self(self.0.with_attachment(attachment)) + } + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self(self.0.congestion_control(congestion_control)) + } + fn priority(self, priority: Priority) -> Self { + Self(self.0.priority(priority)) + } + fn express(self, is_express: bool) -> Self { + Self(self.0.express(is_express)) + } +} + +impl DeleteSampleBuilderTrait for DeleteSampleBuilder {} + +impl From for SampleBuilder { + fn from(sample: Sample) -> Self { + SampleBuilder(sample) + } +} + +impl TryFrom for PutSampleBuilder { + type Error = zresult::Error; + fn try_from(sample: Sample) -> Result { + if sample.kind != SampleKind::Put { + bail!("Sample is not a put sample") + } + Ok(Self(SampleBuilder(sample))) + } +} + +impl TryFrom for DeleteSampleBuilder { + type Error = zresult::Error; + fn try_from(sample: Sample) -> Result { + if sample.kind != SampleKind::Delete { + bail!("Sample is not a delete sample") + } + Ok(Self(SampleBuilder(sample))) + } +} + +impl Resolvable for SampleBuilder { + type To = Sample; +} + +impl Resolvable for PutSampleBuilder { + type To = Sample; +} + +impl Resolvable for DeleteSampleBuilder { + type To = Sample; +} + +impl SyncResolve for SampleBuilder { + fn res_sync(self) -> Self::To { + self.0 + } +} + +impl SyncResolve for PutSampleBuilder { + fn res_sync(self) -> Self::To { + self.0.res_sync() + } +} + +impl SyncResolve for DeleteSampleBuilder { + fn res_sync(self) -> Self::To { + self.0.res_sync() + } +} + +impl AsyncResolve for SampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + futures::future::ready(self.0) + } +} + +impl AsyncResolve for PutSampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + self.0.res_async() + } +} + +impl AsyncResolve for DeleteSampleBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + self.0.res_async() + } +} From a05b93de8c9507e597d2f85bce88c9787241590b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 23 Mar 2024 15:43:58 +0100 Subject: [PATCH 033/357] reply builder unfinished --- .../src/replica/storage.rs | 4 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/queryable.rs | 268 +++++++++++------- zenoh/src/sample_builder.rs | 99 ++++--- 4 files changed, 238 insertions(+), 135 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 576f6adec2..1aadc88611 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,7 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; -use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; +use zenoh::sample_builder::{SampleBuilderTrait, SampleUpdater}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; @@ -368,7 +368,7 @@ impl StorageService { } }; let sample = if sample.timestamp().is_none() { - SampleBuilder::from(sample).with_timestamp(new_reception_timestamp()).res_sync() + SampleUpdater::from(sample).with_timestamp(new_reception_timestamp()).res_sync() } else { sample }; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index eb6d6e9516..19388ea16f 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,7 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; +use zenoh::sample_builder::{SampleBuilderTrait, SampleUpdater}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index d2eabcdc2a..eb6ef013c7 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -20,6 +20,10 @@ use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::QoS; use crate::sample::SourceInfo; +use crate::sample_builder::{ + DeleteSampleBuilder, DeleteSampleBuilderTrait, PutSampleBuilder, PutSampleBuilderTrait, + SampleBuilder, SampleBuilderTrait, +}; use crate::Id; use crate::SessionRef; use crate::Undeclarable; @@ -102,43 +106,6 @@ impl Query { pub fn attachment(&self) -> Option<&Attachment> { self.inner.attachment.as_ref() } - /// Sends a reply in the form of [`Sample`] to this Query. - /// - /// By default, queries only accept replies whose key expression intersects with the query's. - /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), - /// replying on a disjoint key expression will result in an error when resolving the reply. - /// This api is for internal use only. - #[inline(always)] - #[cfg(feature = "unstable")] - #[doc(hidden)] - pub fn reply_sample(&self, sample: Sample) -> ReplyBuilder<'_> { - let Sample { - key_expr, - payload, - kind, - encoding, - timestamp, - qos, - #[cfg(feature = "unstable")] - source_info, - #[cfg(feature = "unstable")] - attachment, - } = sample; - ReplyBuilder { - query: self, - key_expr, - payload, - kind, - encoding, - timestamp, - qos, - #[cfg(feature = "unstable")] - source_info, - #[cfg(feature = "unstable")] - attachment, - } - } - /// Sends a reply to this Query. /// /// By default, queries only accept replies whose key expression intersects with the query's. @@ -154,18 +121,11 @@ impl Query { IntoKeyExpr: Into>, IntoPayload: Into, { + let sample_builder = PutSampleBuilder::new(key_expr, payload) + .with_qos(response::ext::QoSType::RESPONSE.into()); ReplyBuilder { query: self, - key_expr: key_expr.into(), - payload: payload.into(), - kind: SampleKind::Put, - timestamp: None, - encoding: Encoding::default(), - qos: response::ext::QoSType::RESPONSE.into(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, + sample_builder, } } /// Sends a error reply to this Query. @@ -187,22 +147,15 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply_del(&self, key_expr: IntoKeyExpr) -> ReplyBuilder<'_> + pub fn reply_del(&self, key_expr: IntoKeyExpr) -> ReplyDelBuilder<'_> where IntoKeyExpr: Into>, { - ReplyBuilder { + let sample_builder = + DeleteSampleBuilder::new(key_expr).with_qos(response::ext::QoSType::RESPONSE.into()); + ReplyDelBuilder { query: self, - key_expr: key_expr.into(), - payload: Payload::empty(), - kind: SampleKind::Delete, - timestamp: None, - encoding: Encoding::default(), - qos: response::ext::QoSType::RESPONSE.into(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, + sample_builder, } } @@ -250,45 +203,161 @@ impl fmt::Display for Query { #[derive(Debug)] pub struct ReplyBuilder<'a> { query: &'a Query, - key_expr: KeyExpr<'static>, - payload: Payload, - kind: SampleKind, - encoding: Encoding, - timestamp: Option, - qos: QoS, + sample_builder: PutSampleBuilder, +} + +impl SampleBuilderTrait for ReplyBuilder<'_> { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self { + sample_builder: self.sample_builder.with_keyexpr(key_expr), + ..self + } + } + + fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp(timestamp), + ..self + } + } + #[cfg(feature = "unstable")] - source_info: SourceInfo, + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + sample_builder: self.sample_builder.with_source_info(source_info), + ..self + } + } + #[cfg(feature = "unstable")] - attachment: Option, + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment(attachment), + ..self + } + } + + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + sample_builder: self.sample_builder.congestion_control(congestion_control), + ..self + } + } + + fn priority(self, priority: Priority) -> Self { + Self { + sample_builder: self.sample_builder.priority(priority), + ..self + } + } + + fn express(self, is_express: bool) -> Self { + Self { + sample_builder: self.sample_builder.express(is_express), + ..self + } + } } -/// A builder returned by [`Query::reply_err()`](Query::reply_err). +impl PutSampleBuilderTrait for ReplyBuilder<'_> { + fn with_encoding(self, encoding: Encoding) -> Self { + Self { + sample_builder: self.sample_builder.with_encoding(encoding), + ..self + } + } + + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + Self { + sample_builder: self.sample_builder.with_payload(payload), + ..self + } + } +} + +/// A builder returned by [`Query::reply_del()`](Query::reply) #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ReplyErrBuilder<'a> { +pub struct ReplyDelBuilder<'a> { query: &'a Query, - value: Value, + sample_builder: DeleteSampleBuilder, } -impl<'a> ReplyBuilder<'a> { - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self +impl SampleBuilderTrait for ReplyDelBuilder<'_> { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self { + sample_builder: self.sample_builder.with_keyexpr(key_expr), + ..self + } } - #[zenoh_macros::unstable] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.source_info = source_info; - self + + fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp(timestamp), + ..self + } } - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.timestamp = Some(timestamp); - self + + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + sample_builder: self.sample_builder.with_source_info(source_info), + ..self + } } - pub fn with_encoding(mut self, encoding: Encoding) -> Self { - self.encoding = encoding; - self + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment(attachment), + ..self + } + } + + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + sample_builder: self.sample_builder.congestion_control(congestion_control), + ..self + } + } + + fn priority(self, priority: Priority) -> Self { + Self { + sample_builder: self.sample_builder.priority(priority), + ..self + } + } + + fn express(self, is_express: bool) -> Self { + Self { + sample_builder: self.sample_builder.express(is_express), + ..self + } + } +} + +impl DeleteSampleBuilderTrait for ReplyDelBuilder<'_> {} + +/// A builder returned by [`Query::reply_err()`](Query::reply_err). +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct ReplyErrBuilder<'a> { + query: &'a Query, + value: Value, +} + +impl AsRef for ReplyBuilder<'_> { + fn as_ref(&self) -> &PutSampleBuilder { + &self.sample_builder } } @@ -298,19 +367,20 @@ impl<'a> Resolvable for ReplyBuilder<'a> { impl SyncResolve for ReplyBuilder<'_> { fn res_sync(self) -> ::To { + let sample = self.sample_builder.res_sync(); if !self.query._accepts_any_replies().unwrap_or(false) - && !self.query.key_expr().intersects(&self.key_expr) + && !self.query.key_expr().intersects(&sample.key_expr) { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", self.key_expr, self.query.key_expr()) + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.query.key_expr()) } #[allow(unused_mut)] // will be unused if feature = "unstable" is not enabled let mut ext_sinfo = None; #[cfg(feature = "unstable")] { - if self.source_info.source_id.is_some() || self.source_info.source_sn.is_some() { + if sample.source_info.source_id.is_some() || sample.source_info.source_sn.is_some() { ext_sinfo = Some(zenoh::put::ext::SourceInfoType { - id: self.source_info.source_id.unwrap_or_default(), - sn: self.source_info.source_sn.unwrap_or_default() as u32, + id: sample.source_info.source_id.unwrap_or_default(), + sn: sample.source_info.source_sn.unwrap_or_default() as u32, }) } } @@ -318,38 +388,38 @@ impl SyncResolve for ReplyBuilder<'_> { rid: self.query.inner.qid, wire_expr: WireExpr { scope: 0, - suffix: std::borrow::Cow::Owned(self.key_expr.into()), + suffix: std::borrow::Cow::Owned(sample.key_expr.into()), mapping: Mapping::Sender, }, payload: ResponseBody::Reply(zenoh::Reply { consolidation: zenoh::Consolidation::DEFAULT, ext_unknown: vec![], - payload: match self.kind { + payload: match sample.kind { SampleKind::Put => ReplyBody::Put(Put { - timestamp: self.timestamp, - encoding: self.encoding.into(), + timestamp: sample.timestamp, + encoding: sample.encoding.into(), ext_sinfo, #[cfg(feature = "shared-memory")] ext_shm: None, #[cfg(feature = "unstable")] - ext_attachment: self.attachment.map(|a| a.into()), + ext_attachment: sample.attachment.map(|a| a.into()), #[cfg(not(feature = "unstable"))] ext_attachment: None, ext_unknown: vec![], - payload: self.payload.into(), + payload: sample.payload.into(), }), SampleKind::Delete => ReplyBody::Del(Del { - timestamp: self.timestamp, + timestamp: sample.timestamp, ext_sinfo, #[cfg(feature = "unstable")] - ext_attachment: self.attachment.map(|a| a.into()), + ext_attachment: sample.attachment.map(|a| a.into()), #[cfg(not(feature = "unstable"))] ext_attachment: None, ext_unknown: vec![], }), }, }), - ext_qos: self.qos.into(), + ext_qos: sample.qos.into(), ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid: self.query.inner.zid, diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index a9cffb22d1..fcf3a64182 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -51,52 +51,76 @@ pub trait PutSampleBuilderTrait: SampleBuilderTrait { pub trait DeleteSampleBuilderTrait: SampleBuilderTrait {} +#[derive(Debug)] pub struct SampleBuilder(Sample); +impl SampleBuilder { + pub(crate) fn without_timestamp(self) -> Self { + Self(Sample { + timestamp: None, + ..self.0 + }) + } + pub(crate) fn without_attachment(self) -> Self { + Self(Sample { + attachment: None, + ..self.0 + }) + } +} + impl SampleBuilderTrait for SampleBuilder { fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { - let mut this = self; - this.0.key_expr = key_expr.into(); - this + Self(Sample { + key_expr: key_expr.into(), + ..self.0 + }) } fn with_timestamp(self, timestamp: Timestamp) -> Self { - let mut this = self; - this.0.timestamp = Some(timestamp); - this + Self(Sample { + timestamp: Some(timestamp), + ..self.0 + }) } #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { - let mut this = self; - this.0.source_info = source_info; - this + Self(Sample { + source_info, + ..self.0 + }) } #[zenoh_macros::unstable] fn with_attachment(self, attachment: Attachment) -> Self { - let mut this = self; - this.0.attachment = Some(attachment); - this + Self(Sample { + attachment: Some(attachment), + ..self.0 + }) } fn congestion_control(self, congestion_control: CongestionControl) -> Self { - let mut this = self; - this.0.qos = this.0.qos.with_congestion_control(congestion_control); - this + Self(Sample { + qos: self.0.qos.with_congestion_control(congestion_control), + ..self.0 + }) } fn priority(self, priority: Priority) -> Self { - let mut this = self; - this.0.qos = this.0.qos.with_priority(priority); - this + Self(Sample { + qos: self.0.qos.with_priority(priority), + ..self.0 + }) } fn express(self, is_express: bool) -> Self { - let mut this = self; - this.0.qos = this.0.qos.with_express(is_express); - this + Self(Sample { + qos: self.0.qos.with_express(is_express), + ..self.0 + }) } } +#[derive(Debug)] pub struct PutSampleBuilder(SampleBuilder); impl PutSampleBuilder { @@ -118,15 +142,17 @@ impl PutSampleBuilder { attachment: None, })) } + #[zenoh_macros::unstable] pub fn without_timestamp(self) -> Self { - let mut this = self; - this.0 .0.timestamp = None; - this + Self(self.0.without_timestamp()) } + #[zenoh_macros::unstable] pub fn without_attachment(self) -> Self { - let mut this = self; - this.0 .0.attachment = None; - this + Self(self.0.without_attachment()) + } + // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. + pub(crate) fn with_qos(self, qos: QoS) -> Self { + Self(SampleBuilder(Sample { qos, ..self.0 .0 })) } } @@ -161,20 +187,23 @@ impl SampleBuilderTrait for PutSampleBuilder { impl PutSampleBuilderTrait for PutSampleBuilder { fn with_encoding(self, encoding: Encoding) -> Self { - let mut this = self; - this.0 .0.encoding = encoding; - this + Self(SampleBuilder(Sample { + encoding, + ..self.0 .0 + })) } fn with_payload(self, payload: IntoPayload) -> Self where IntoPayload: Into, { - let mut this = self; - this.0 .0.payload = payload.into(); - this + Self(SampleBuilder(Sample { + payload: payload.into(), + ..self.0 .0 + })) } } +#[derive(Debug)] pub struct DeleteSampleBuilder(SampleBuilder); impl DeleteSampleBuilder { @@ -195,6 +224,10 @@ impl DeleteSampleBuilder { attachment: None, })) } + // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. + pub(crate) fn with_qos(self, qos: QoS) -> Self { + Self(SampleBuilder(Sample { qos, ..self.0 .0 })) + } } impl SampleBuilderTrait for DeleteSampleBuilder { From 0992ff8812df04e4b0dc9acc01a45763739d0792 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 23 Mar 2024 22:44:36 +0100 Subject: [PATCH 034/357] replybuilder unfinished --- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 1 + zenoh/src/queryable.rs | 38 ++++++++++++++++++---------- 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 19388ea16f..eb6d6e9516 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,7 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample_builder::{SampleBuilderTrait, SampleUpdater}; +use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 29106cb89d..01f29ba19b 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -20,6 +20,7 @@ use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use crate::queryable::Query; use crate::queryable::QueryInner; +use crate::sample_builder::PutSampleBuilderTrait; use crate::value::Value; use async_std::task; use log::{error, trace}; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index eb6ef013c7..a5b6deca4c 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,11 +18,10 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::QoS; use crate::sample::SourceInfo; use crate::sample_builder::{ DeleteSampleBuilder, DeleteSampleBuilderTrait, PutSampleBuilder, PutSampleBuilderTrait, - SampleBuilder, SampleBuilderTrait, + SampleBuilderTrait, }; use crate::Id; use crate::SessionRef; @@ -355,23 +354,34 @@ pub struct ReplyErrBuilder<'a> { value: Value, } -impl AsRef for ReplyBuilder<'_> { - fn as_ref(&self) -> &PutSampleBuilder { - &self.sample_builder +impl<'a> Resolvable for ReplyBuilder<'a> { + type To = ZResult<()>; +} + +impl SyncResolve for ReplyBuilder<'_> { + fn res_sync(self) -> ::To { + let sample = self.sample_builder.res_sync(); + self.query._reply_sample(sample) } } -impl<'a> Resolvable for ReplyBuilder<'a> { +impl<'a> Resolvable for ReplyDelBuilder<'a> { type To = ZResult<()>; } -impl SyncResolve for ReplyBuilder<'_> { +impl SyncResolve for ReplyDelBuilder<'_> { fn res_sync(self) -> ::To { let sample = self.sample_builder.res_sync(); - if !self.query._accepts_any_replies().unwrap_or(false) - && !self.query.key_expr().intersects(&sample.key_expr) + self.query._reply_sample(sample) + } +} + +impl Query { + fn _reply_sample(&self, sample: Sample) -> ZResult<()> { + if !self._accepts_any_replies().unwrap_or(false) + && !self.key_expr().intersects(&sample.key_expr) { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.query.key_expr()) + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) } #[allow(unused_mut)] // will be unused if feature = "unstable" is not enabled let mut ext_sinfo = None; @@ -384,8 +394,8 @@ impl SyncResolve for ReplyBuilder<'_> { }) } } - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, + self.inner.primitives.send_response(Response { + rid: self.inner.qid, wire_expr: WireExpr { scope: 0, suffix: std::borrow::Cow::Owned(sample.key_expr.into()), @@ -422,8 +432,8 @@ impl SyncResolve for ReplyBuilder<'_> { ext_qos: sample.qos.into(), ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: self.query.eid, + zid: self.inner.zid, + eid: self.eid, }), }); Ok(()) From 62378ad1805d3e13db06664f1176ca0f89393fe2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 11:21:32 +0100 Subject: [PATCH 035/357] new reply_sample --- zenoh/src/queryable.rs | 102 +++++++++++++++++++++++++++++++++++- zenoh/src/sample_builder.rs | 43 ++++++++++++++- 2 files changed, 141 insertions(+), 4 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index a5b6deca4c..d0b80e9a11 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -21,7 +21,7 @@ use crate::prelude::*; use crate::sample::SourceInfo; use crate::sample_builder::{ DeleteSampleBuilder, DeleteSampleBuilderTrait, PutSampleBuilder, PutSampleBuilderTrait, - SampleBuilderTrait, + SampleBuilder, SampleBuilderTrait, }; use crate::Id; use crate::SessionRef; @@ -105,6 +105,24 @@ impl Query { pub fn attachment(&self) -> Option<&Attachment> { self.inner.attachment.as_ref() } + + /// Sends a reply or delete reply to this Query + /// + /// This function is useful when resending the samples which can be of [`SampleKind::Put`] or [`SampleKind::Delete`] + /// It allows to build the reply with same common parameters, like timestamp, attachment, source_info, etc. + /// and only on final step to choose the kind of reply by calling [`ReplySampleBuilder::put`] or [`ReplySampleBuilder::delete`] methods. + #[inline(always)] + pub fn reply_sample(&self, key_expr: IntoKeyExpr) -> ReplySampleBuilder + where + IntoKeyExpr: Into>, + { + let sample_builder = SampleBuilder::new(key_expr); + ReplySampleBuilder { + query: self, + sample_builder, + } + } + /// Sends a reply to this Query. /// /// By default, queries only accept replies whose key expression intersects with the query's. @@ -197,7 +215,87 @@ impl fmt::Display for Query { } } -/// A builder returned by [`Query::reply()`](Query::reply) or [`Query::reply()`](Query::reply). +pub struct ReplySampleBuilder<'a> { + query: &'a Query, + sample_builder: SampleBuilder, +} + +impl<'a> ReplySampleBuilder<'a> { + pub fn put(self, payload: IntoPayload) -> ReplyBuilder<'a> + where + IntoPayload: Into, + { + let builder = ReplyBuilder { + query: self.query, + sample_builder: self.sample_builder.into(), + }; + builder.with_payload(payload) + } + pub fn delete(self) -> ReplyDelBuilder<'a> { + ReplyDelBuilder { + query: self.query, + sample_builder: self.sample_builder.into(), + } + } +} + +impl SampleBuilderTrait for ReplySampleBuilder<'_> { + fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self { + sample_builder: self.sample_builder.with_keyexpr(key_expr), + ..self + } + } + + fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp(timestamp), + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + sample_builder: self.sample_builder.with_source_info(source_info), + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment(attachment), + ..self + } + } + + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + sample_builder: self.sample_builder.congestion_control(congestion_control), + ..self + } + } + + fn priority(self, priority: Priority) -> Self { + Self { + sample_builder: self.sample_builder.priority(priority), + ..self + } + } + + fn express(self, is_express: bool) -> Self { + Self { + sample_builder: self.sample_builder.express(is_express), + ..self + } + } +} + +/// A builder returned by [`Query::reply()`](Query::reply) #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] pub struct ReplyBuilder<'a> { diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index fcf3a64182..61e4bf81fb 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -55,13 +55,30 @@ pub trait DeleteSampleBuilderTrait: SampleBuilderTrait {} pub struct SampleBuilder(Sample); impl SampleBuilder { - pub(crate) fn without_timestamp(self) -> Self { + pub fn new(key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(Sample { + key_expr: key_expr.into(), + payload: Payload::empty(), + kind: SampleKind::default(), + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + }) + } + pub fn without_timestamp(self) -> Self { Self(Sample { timestamp: None, ..self.0 }) } - pub(crate) fn without_attachment(self) -> Self { + pub fn without_attachment(self) -> Self { Self(Sample { attachment: None, ..self.0 @@ -123,6 +140,17 @@ impl SampleBuilderTrait for SampleBuilder { #[derive(Debug)] pub struct PutSampleBuilder(SampleBuilder); +impl From for PutSampleBuilder { + fn from(sample_builder: SampleBuilder) -> Self { + Self(SampleBuilder { + 0: Sample { + kind: SampleKind::Put, + ..sample_builder.0 + }, + }) + } +} + impl PutSampleBuilder { pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self where @@ -206,6 +234,17 @@ impl PutSampleBuilderTrait for PutSampleBuilder { #[derive(Debug)] pub struct DeleteSampleBuilder(SampleBuilder); +impl From for DeleteSampleBuilder { + fn from(sample_builder: SampleBuilder) -> Self { + Self(SampleBuilder { + 0: Sample { + kind: SampleKind::Delete, + ..sample_builder.0 + }, + }) + } +} + impl DeleteSampleBuilder { pub fn new(key_expr: IntoKeyExpr) -> Self where From cc580a5dd4a30409b12ab4ae7c5a81d0b9d5ab1d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 13:08:10 +0100 Subject: [PATCH 036/357] sample decompose, opt setters --- zenoh/src/queryable.rs | 45 +++++++++++++++++++++++++++++ zenoh/src/sample.rs | 35 +++++++++++++++++++++++ zenoh/src/sample_builder.rs | 56 +++++++++++++++++++++---------------- 3 files changed, 112 insertions(+), 24 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index d0b80e9a11..7d4a0903c2 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -250,6 +250,13 @@ impl SampleBuilderTrait for ReplySampleBuilder<'_> { } } + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp_opt(timestamp), + ..self + } + } + fn with_timestamp(self, timestamp: Timestamp) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), @@ -265,6 +272,14 @@ impl SampleBuilderTrait for ReplySampleBuilder<'_> { } } + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment_opt(attachment), + ..self + } + } + #[cfg(feature = "unstable")] fn with_attachment(self, attachment: Attachment) -> Self { Self { @@ -314,6 +329,13 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { } } + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp_opt(timestamp), + ..self + } + } + fn with_timestamp(self, timestamp: Timestamp) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), @@ -329,6 +351,14 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { } } + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment_opt(attachment), + ..self + } + } + #[cfg(feature = "unstable")] fn with_attachment(self, attachment: Attachment) -> Self { Self { @@ -397,6 +427,13 @@ impl SampleBuilderTrait for ReplyDelBuilder<'_> { } } + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_timestamp_opt(timestamp), + ..self + } + } + fn with_timestamp(self, timestamp: Timestamp) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), @@ -412,6 +449,14 @@ impl SampleBuilderTrait for ReplyDelBuilder<'_> { } } + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { + sample_builder: self.sample_builder.with_attachment_opt(attachment), + ..self + } + } + #[cfg(feature = "unstable")] fn with_attachment(self, attachment: Attachment) -> Self { Self { diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 2c98d5ead1..3ac3087836 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -433,6 +433,41 @@ impl TryFrom for SampleKind { #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; +/// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. +pub struct SampleFields { + pub key_expr: KeyExpr<'static>, + pub payload: Payload, + pub kind: SampleKind, + pub encoding: Encoding, + pub timestamp: Option, + pub express: bool, + pub priority: Priority, + pub congestion_control: CongestionControl, + #[cfg(feature = "unstable")] + pub source_info: SourceInfo, + #[cfg(feature = "unstable")] + pub attachment: Option, +} + +impl From for SampleFields { + fn from(sample: Sample) -> Self { + SampleFields { + key_expr: sample.key_expr, + payload: sample.payload, + kind: sample.kind, + encoding: sample.encoding, + timestamp: sample.timestamp, + express: sample.qos.express(), + priority: sample.qos.priority(), + congestion_control: sample.qos.congestion_control(), + #[cfg(feature = "unstable")] + source_info: sample.source_info, + #[cfg(feature = "unstable")] + attachment: sample.attachment, + } + } +} + /// A zenoh sample. #[non_exhaustive] #[derive(Clone, Debug)] diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 61e4bf81fb..c0ebf8c9d0 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -32,10 +32,13 @@ pub trait SampleBuilderTrait { fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>; + fn with_timestamp_opt(self, timestamp: Option) -> Self; fn with_timestamp(self, timestamp: Timestamp) -> Self; #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self; #[zenoh_macros::unstable] + fn with_attachment_opt(self, attachment: Option) -> Self; + #[zenoh_macros::unstable] fn with_attachment(self, attachment: Attachment) -> Self; fn congestion_control(self, congestion_control: CongestionControl) -> Self; fn priority(self, priority: Priority) -> Self; @@ -72,18 +75,6 @@ impl SampleBuilder { attachment: None, }) } - pub fn without_timestamp(self) -> Self { - Self(Sample { - timestamp: None, - ..self.0 - }) - } - pub fn without_attachment(self) -> Self { - Self(Sample { - attachment: None, - ..self.0 - }) - } } impl SampleBuilderTrait for SampleBuilder { @@ -97,12 +88,17 @@ impl SampleBuilderTrait for SampleBuilder { }) } - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp_opt(self, timestamp: Option) -> Self { Self(Sample { - timestamp: Some(timestamp), + timestamp, ..self.0 }) } + + fn with_timestamp(self, timestamp: Timestamp) -> Self { + self.with_timestamp_opt(Some(timestamp)) + } + #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(Sample { @@ -110,13 +106,19 @@ impl SampleBuilderTrait for SampleBuilder { ..self.0 }) } + #[zenoh_macros::unstable] - fn with_attachment(self, attachment: Attachment) -> Self { + fn with_attachment_opt(self, attachment: Option) -> Self { Self(Sample { - attachment: Some(attachment), + attachment, ..self.0 }) } + + #[zenoh_macros::unstable] + fn with_attachment(self, attachment: Attachment) -> Self { + self.with_attachment_opt(Some(attachment)) + } fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(Sample { qos: self.0.qos.with_congestion_control(congestion_control), @@ -170,14 +172,6 @@ impl PutSampleBuilder { attachment: None, })) } - #[zenoh_macros::unstable] - pub fn without_timestamp(self) -> Self { - Self(self.0.without_timestamp()) - } - #[zenoh_macros::unstable] - pub fn without_attachment(self) -> Self { - Self(self.0.without_attachment()) - } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. pub(crate) fn with_qos(self, qos: QoS) -> Self { Self(SampleBuilder(Sample { qos, ..self.0 .0 })) @@ -194,6 +188,9 @@ impl SampleBuilderTrait for PutSampleBuilder { fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self(self.0.with_timestamp_opt(timestamp)) + } #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(self.0.with_source_info(source_info)) @@ -202,6 +199,10 @@ impl SampleBuilderTrait for PutSampleBuilder { fn with_attachment(self, attachment: Attachment) -> Self { Self(self.0.with_attachment(attachment)) } + #[zenoh_macros::unstable] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self(self.0.with_attachment_opt(attachment)) + } fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(self.0.congestion_control(congestion_control)) } @@ -279,6 +280,9 @@ impl SampleBuilderTrait for DeleteSampleBuilder { fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self(self.0.with_timestamp_opt(timestamp)) + } #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(self.0.with_source_info(source_info)) @@ -287,6 +291,10 @@ impl SampleBuilderTrait for DeleteSampleBuilder { fn with_attachment(self, attachment: Attachment) -> Self { Self(self.0.with_attachment(attachment)) } + #[zenoh_macros::unstable] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self(self.0.with_attachment_opt(attachment)) + } fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(self.0.congestion_control(congestion_control)) } From 270840247c72238654be20f611c4d4cb6338cfc4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 15:38:36 +0100 Subject: [PATCH 037/357] samples, plugins updated --- plugins/zenoh-plugin-example/src/lib.rs | 13 +++++++++- .../src/replica/align_queryable.rs | 2 ++ .../src/replica/storage.rs | 4 +-- zenoh-ext/src/publication_cache.rs | 25 ++++++++++++++----- zenoh/src/queryable.rs | 24 ++++++++++++------ 5 files changed, 51 insertions(+), 17 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 04f49b4739..9b9dda40de 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -24,6 +24,7 @@ use std::sync::{ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; +use zenoh::sample_builder::SampleBuilderTrait; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; @@ -174,7 +175,17 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { - query.reply_sample(sample.clone()).res().await.unwrap(); + let reply = query + .reply_sample(sample.key_expr().clone().into_owned()) + .with_timestamp_opt(sample.timestamp().cloned()); + #[cfg(feature = "unstable")] + let reply = reply + .with_attachment_opt(sample.attachment()) + .with_source_info(sample.source_info()); + match sample.kind() { + SampleKind::Put => reply.put(sample.payload().clone()).res().await.unwrap(), + SampleKind::Delete => reply.delete().res().await.unwrap(), + } } } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 32be4a5534..691fabd7a7 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,6 +20,8 @@ use std::str; use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::PutSampleBuilderTrait; +use zenoh::sample_builder::SampleBuilderTrait; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 1aadc88611..95af3c97a2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,7 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; -use zenoh::sample_builder::{SampleBuilderTrait, SampleUpdater}; +use zenoh::sample_builder::{PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; @@ -368,7 +368,7 @@ impl StorageService { } }; let sample = if sample.timestamp().is_none() { - SampleUpdater::from(sample).with_timestamp(new_reception_timestamp()).res_sync() + SampleBuilder::from(sample).with_timestamp(new_reception_timestamp()).res_sync() } else { sample }; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 85cb96cce2..78fff32014 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -20,6 +20,7 @@ use std::convert::TryInto; use std::future::Ready; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; +use zenoh::sample_builder::SampleBuilderTrait; use zenoh::subscriber::FlumeSubscriber; use zenoh::SessionRef; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; @@ -116,6 +117,22 @@ pub struct PublicationCache<'a> { _stoptx: Sender, } +async fn reply_sample(query: &Query, sample: &Sample) { + let reply = query + .reply_sample(sample.key_expr().clone().into_owned()) + .with_timestamp_opt(sample.timestamp().cloned()); + #[cfg(feature = "unstable")] + let reply = reply + .with_attachment_opt(sample.attachment()) + .with_source_info(sample.source_info()); + if let Err(e) = match sample.kind() { + SampleKind::Put => reply.put(sample.payload().clone()).res_async().await, + SampleKind::Delete => reply.delete().res_async().await, + } { + log::warn!("Error replying to query: {}", e); + } +} + impl<'a> PublicationCache<'a> { fn new(conf: PublicationCacheBuilder<'a, '_, '_>) -> ZResult> { let key_expr = conf.pub_key_expr?; @@ -212,9 +229,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply_sample(sample.clone()).res_async().await { - log::warn!("Error replying to query: {}", e); - } + reply_sample(&query, sample).await; } } } else { @@ -226,9 +241,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply_sample(sample.clone()).res_async().await { - log::warn!("Error replying to query: {}", e); - } + reply_sample(&query, sample).await; } } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 7d4a0903c2..f2e00e47c6 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -519,6 +519,22 @@ impl SyncResolve for ReplyDelBuilder<'_> { } } +impl<'a> AsyncResolve for ReplyBuilder<'a> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +impl<'a> AsyncResolve for ReplyDelBuilder<'a> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + impl Query { fn _reply_sample(&self, sample: Sample) -> ZResult<()> { if !self._accepts_any_replies().unwrap_or(false) @@ -583,14 +599,6 @@ impl Query { } } -impl<'a> AsyncResolve for ReplyBuilder<'a> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - impl<'a> Resolvable for ReplyErrBuilder<'a> { type To = ZResult<()>; } From b80fd0aa30842e607ae661547368df7f818f3a29 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 18:58:55 +0100 Subject: [PATCH 038/357] interceptors removed from plugin storage API --- plugins/zenoh-backend-example/src/lib.rs | 13 +--- plugins/zenoh-backend-traits/src/lib.rs | 68 +------------------ .../src/backends_mgt.rs | 7 -- .../zenoh-plugin-storage-manager/src/lib.rs | 4 -- .../src/memory_backend/mod.rs | 20 ------ .../src/replica/storage.rs | 23 ------- 6 files changed, 3 insertions(+), 132 deletions(-) diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 602d29f375..f81231a498 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -13,11 +13,8 @@ // use async_std::sync::RwLock; use async_trait::async_trait; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; -use zenoh::{prelude::OwnedKeyExpr, sample::Sample, time::Timestamp, value::Value}; +use std::collections::{hash_map::Entry, HashMap}; +use zenoh::{prelude::OwnedKeyExpr, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, @@ -71,12 +68,6 @@ impl Volume for ExampleBackend { async fn create_storage(&self, _props: StorageConfig) -> ZResult> { Ok(Box::::default()) } - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } } #[async_trait] diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index d17e6dfd77..40d022f1ec 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -68,16 +68,6 @@ //! // The properties are the ones passed via a PUT in the admin space for Storage creation. //! Ok(Box::new(MyStorage::new(properties).await?)) //! } -//! -//! fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { -//! // No interception point for incoming data (on PUT operations) -//! None -//! } -//! -//! fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { -//! // No interception point for outgoing data (on GET operations) -//! None -//! } //! } //! //! // Your Storage implementation @@ -135,9 +125,7 @@ use async_trait::async_trait; use const_format::concatcp; -use std::sync::Arc; -use zenoh::prelude::{KeyExpr, OwnedKeyExpr, Sample, Selector}; -use zenoh::queryable::ReplyBuilder; +use zenoh::prelude::OwnedKeyExpr; use zenoh::time::Timestamp; use zenoh::value::Value; pub use zenoh::Result as ZResult; @@ -210,14 +198,6 @@ pub trait Volume: Send + Sync { /// Creates a storage configured with some properties. async fn create_storage(&self, props: StorageConfig) -> ZResult>; - - /// Returns an interceptor that will be called before pushing any data - /// into a storage created by this backend. `None` can be returned for no interception point. - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>>; - - /// Returns an interceptor that will be called before sending any reply - /// to a query from a storage created by this backend. `None` can be returned for no interception point. - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>>; } pub type VolumeInstance = Box; @@ -282,49 +262,3 @@ pub trait Storage: Send + Sync { /// Remember to fetch the entry corresponding to the `None` key async fn get_all_entries(&self) -> ZResult, Timestamp)>>; } - -/// A wrapper around the [`zenoh::queryable::Query`] allowing to call the -/// OutgoingDataInterceptor (if any) before to send the reply -pub struct Query { - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, -} - -impl Query { - pub fn new( - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, - ) -> Query { - Query { q, interceptor } - } - - /// The full [`Selector`] of this Query. - #[inline(always)] - pub fn selector(&self) -> Selector<'_> { - self.q.selector() - } - - /// The key selector part of this Query. - #[inline(always)] - pub fn key_expr(&self) -> &KeyExpr<'static> { - self.q.key_expr() - } - - /// This Query's selector parameters. - #[inline(always)] - pub fn parameters(&self) -> &str { - self.q.parameters() - } - - /// Sends a Sample as a reply to this Query - pub fn reply(&self, sample: Sample) -> ReplyBuilder<'_> { - // Call outgoing intercerceptor - let sample = if let Some(ref interceptor) = self.interceptor { - interceptor(sample) - } else { - sample - }; - // Send reply - self.q.reply_sample(sample) - } -} diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index aa7260e868..90a6ae6250 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,7 +14,6 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; -use zenoh::prelude::r#async::*; use zenoh::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; @@ -23,16 +22,12 @@ use zenoh_result::ZResult; pub struct StoreIntercept { pub storage: Box, pub capability: Capability, - pub in_interceptor: Option Sample + Send + Sync>>, - pub out_interceptor: Option Sample + Send + Sync>>, } pub(crate) async fn create_and_start_storage( admin_key: String, config: StorageConfig, backend: &VolumeInstance, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, zenoh: Arc, ) -> ZResult> { log::trace!("Create storage '{}'", &admin_key); @@ -41,8 +36,6 @@ pub(crate) async fn create_and_start_storage( let store_intercept = StoreIntercept { storage, capability, - in_interceptor, - out_interceptor, }; start_storage(store_intercept, config, admin_key, zenoh).await diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 0db30bbd6a..91df2f108d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -239,14 +239,10 @@ impl StorageRuntimeInner { volume_id, backend.name() ); - let in_interceptor = backend.instance().incoming_data_interceptor(); - let out_interceptor = backend.instance().outgoing_data_interceptor(); let stopper = async_std::task::block_on(create_and_start_storage( admin_key, storage.clone(), backend.instance(), - in_interceptor, - out_interceptor, self.session.clone(), ))?; self.storages diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index ebb4922c9d..4e333b8592 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -61,26 +61,6 @@ impl Volume for MemoryBackend { log::debug!("Create Memory Storage with configuration: {:?}", properties); Ok(Box::new(MemoryStorage::new(properties).await?)) } - - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!(">>>> IN INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } - - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!("<<<< OUT INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } } impl Drop for MemoryBackend { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 0708dcabd9..35134dfe43 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -60,8 +60,6 @@ pub struct StorageService { capability: Capability, tombstones: Arc>>, wildcard_updates: Arc>>, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, replication: Option, } @@ -85,8 +83,6 @@ impl StorageService { capability: store_intercept.capability, tombstones: Arc::new(RwLock::new(KeBoxTree::default())), wildcard_updates: Arc::new(RwLock::new(KeBoxTree::default())), - in_interceptor: store_intercept.in_interceptor, - out_interceptor: store_intercept.out_interceptor, replication, }; if storage_service @@ -263,13 +259,6 @@ impl StorageService { // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - // Call incoming data interceptor (if any) - let sample = if let Some(ref interceptor) = self.in_interceptor { - interceptor(sample) - } else { - sample - }; - // if wildcard, update wildcard_updates if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; @@ -523,12 +512,6 @@ impl StorageService { let sample = Sample::new(key.clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", @@ -561,12 +544,6 @@ impl StorageService { let sample = Sample::new(q.key_expr().clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", From 7d2abd44b19ed7ba86713f1752990ba344d07235 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 19:20:11 +0100 Subject: [PATCH 039/357] deconstruct sample api used --- .../src/replica/aligner.rs | 3 +-- .../src/replica/storage.rs | 25 +++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 3f672382f1..f00029442f 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -12,11 +12,10 @@ // ZettaScale Zenoh Team, // -use crate::replica::storage::StorageSampleKind; - use super::storage::StorageSample; use super::{Digest, EraType, LogEntry, Snapshotter}; use super::{CONTENTS, ERA, INTERVALS, SUBINTERVALS}; +use crate::replica::storage::StorageSampleKind; use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 95af3c97a2..fbc734d716 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,6 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; +use zenoh::sample::SampleFields; use zenoh::sample_builder::{PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; @@ -55,19 +56,23 @@ pub struct StorageSample { impl From for StorageSample { fn from(sample: Sample) -> Self { - let timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); - // TODO: add API for disassembly of Sample - let key_expr = sample.key_expr().clone(); - let payload = sample.payload().clone(); - let encoding = sample.encoding().clone(); - let kind = match sample.kind() { - SampleKind::Put => StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)), - SampleKind::Delete => StorageSampleKind::Delete, - }; - StorageSample { + let SampleFields { key_expr, timestamp, kind, + payload, + encoding, + .. + } = sample.into(); + StorageSample { + key_expr, + timestamp: timestamp.unwrap_or(new_reception_timestamp()), + kind: match kind { + SampleKind::Put => { + StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)) + } + SampleKind::Delete => StorageSampleKind::Delete, + }, } } } From 2b1071f9b9b06dd10d401969ce5c8678560aea03 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 19:52:30 +0100 Subject: [PATCH 040/357] comment, clippy fix --- zenoh/src/sample.rs | 4 ++-- zenoh/src/sample_builder.rs | 20 ++++++++------------ 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 3ac3087836..acf8536a0e 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -67,10 +67,10 @@ pub(crate) trait DataInfoIntoSample { } impl DataInfoIntoSample for DataInfo { - // TODO: this is internal function. + // This function is for internal use only. // Technically it may create invalid sample (e.g. a delete sample with a payload and encoding) // The test for it is intentionally not added to avoid inserting extra "if" into hot path. - // This need to be additionally investigated and measured. + // The correctness of the data should be ensured by the caller. #[inline] fn into_sample( self, diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index c0ebf8c9d0..c7ee6e8368 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -144,12 +144,10 @@ pub struct PutSampleBuilder(SampleBuilder); impl From for PutSampleBuilder { fn from(sample_builder: SampleBuilder) -> Self { - Self(SampleBuilder { - 0: Sample { - kind: SampleKind::Put, - ..sample_builder.0 - }, - }) + Self(SampleBuilder(Sample { + kind: SampleKind::Put, + ..sample_builder.0 + })) } } @@ -237,12 +235,10 @@ pub struct DeleteSampleBuilder(SampleBuilder); impl From for DeleteSampleBuilder { fn from(sample_builder: SampleBuilder) -> Self { - Self(SampleBuilder { - 0: Sample { - kind: SampleKind::Delete, - ..sample_builder.0 - }, - }) + Self(SampleBuilder(Sample { + kind: SampleKind::Delete, + ..sample_builder.0 + })) } } From 3386237bea3e10f80ddb5089617f723577cef5b5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 19:58:46 +0100 Subject: [PATCH 041/357] clippy fix --- plugins/zenoh-plugin-storage-manager/src/replica/storage.rs | 2 +- zenoh/tests/attachments.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index fbc734d716..f2fb0386c3 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -443,7 +443,7 @@ impl StorageService { match self.ovderriding_wild_update(&k, &sample.timestamp).await { Some(overriding_update) => overriding_update.into_sample(k.clone()), - None => sample.clone().into(), + None => sample.clone(), }; let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 38d03b0a84..8d26cc0344 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -51,7 +51,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{prelude::sync::*, sample::Attachment}; + use zenoh::{prelude::sync::*, sample::Attachment, sample_builder::SampleBuilderTrait}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh From f52140aec5909389323cdad70d84b9fc4ba71395 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 20:13:49 +0100 Subject: [PATCH 042/357] zenoh-ext links zenoh with unstable --- zenoh-ext/src/publication_cache.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 78fff32014..a4eff1e932 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -121,10 +121,9 @@ async fn reply_sample(query: &Query, sample: &Sample) { let reply = query .reply_sample(sample.key_expr().clone().into_owned()) .with_timestamp_opt(sample.timestamp().cloned()); - #[cfg(feature = "unstable")] let reply = reply - .with_attachment_opt(sample.attachment()) - .with_source_info(sample.source_info()); + .with_attachment_opt(sample.attachment().cloned()) + .with_source_info(sample.source_info().clone()); if let Err(e) = match sample.kind() { SampleKind::Put => reply.put(sample.payload().clone()).res_async().await, SampleKind::Delete => reply.delete().res_async().await, From a629c765fb86823d3f4fa57d979936c49915221a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 24 Mar 2024 22:06:24 +0100 Subject: [PATCH 043/357] samplefields used --- plugins/zenoh-plugin-example/src/lib.rs | 16 ++++++++-------- zenoh-ext/src/publication_cache.rs | 23 ++++++++++++++++------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 9b9dda40de..40f8d69488 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -24,6 +24,7 @@ use std::sync::{ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; +use zenoh::sample::SampleFields; use zenoh::sample_builder::SampleBuilderTrait; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; @@ -175,15 +176,14 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { + let SampleFields { key_expr, timestamp, attachment, source_info, payload, kind, .. } = sample.clone().into(); let reply = query - .reply_sample(sample.key_expr().clone().into_owned()) - .with_timestamp_opt(sample.timestamp().cloned()); - #[cfg(feature = "unstable")] - let reply = reply - .with_attachment_opt(sample.attachment()) - .with_source_info(sample.source_info()); - match sample.kind() { - SampleKind::Put => reply.put(sample.payload().clone()).res().await.unwrap(), + .reply_sample(key_expr) + .with_timestamp_opt(timestamp) + .with_attachment_opt(attachment) + .with_source_info(source_info); + match kind { + SampleKind::Put => reply.put(payload).res().await.unwrap(), SampleKind::Delete => reply.delete().res().await.unwrap(), } } diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index a4eff1e932..8a782a179e 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -20,6 +20,7 @@ use std::convert::TryInto; use std::future::Ready; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; +use zenoh::sample::SampleFields; use zenoh::sample_builder::SampleBuilderTrait; use zenoh::subscriber::FlumeSubscriber; use zenoh::SessionRef; @@ -118,14 +119,22 @@ pub struct PublicationCache<'a> { } async fn reply_sample(query: &Query, sample: &Sample) { + let SampleFields { + key_expr, + timestamp, + attachment, + source_info, + payload, + kind, + .. + } = sample.clone().into(); let reply = query - .reply_sample(sample.key_expr().clone().into_owned()) - .with_timestamp_opt(sample.timestamp().cloned()); - let reply = reply - .with_attachment_opt(sample.attachment().cloned()) - .with_source_info(sample.source_info().clone()); - if let Err(e) = match sample.kind() { - SampleKind::Put => reply.put(sample.payload().clone()).res_async().await, + .reply_sample(key_expr) + .with_timestamp_opt(timestamp) + .with_attachment_opt(attachment) + .with_source_info(source_info); + if let Err(e) = match kind { + SampleKind::Put => reply.put(payload).res_async().await, SampleKind::Delete => reply.delete().res_async().await, } { log::warn!("Error replying to query: {}", e); From 1945492ec9a27546e211e5bffac5bd5206cbdcd1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 25 Mar 2024 10:41:14 +0100 Subject: [PATCH 044/357] restored old storage manager code --- .../src/replica/aligner.rs | 22 +- .../src/replica/storage.rs | 343 +++++++----------- 2 files changed, 146 insertions(+), 219 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index f00029442f..a899196e7e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -12,10 +12,8 @@ // ZettaScale Zenoh Team, // -use super::storage::StorageSample; use super::{Digest, EraType, LogEntry, Snapshotter}; use super::{CONTENTS, ERA, INTERVALS, SUBINTERVALS}; -use crate::replica::storage::StorageSampleKind; use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; @@ -23,15 +21,17 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::{PutSampleBuilder, PutSampleBuilderTrait, SampleBuilderTrait}; use zenoh::time::Timestamp; use zenoh::Session; +use zenoh_core::{AsyncResolve, SyncResolve}; pub struct Aligner { session: Arc, digest_key: OwnedKeyExpr, snapshotter: Arc, rx_digest: Receiver<(String, Digest)>, - tx_sample: Sender, + tx_sample: Sender, digests_processed: RwLock>, } @@ -40,7 +40,7 @@ impl Aligner { session: Arc, digest_key: OwnedKeyExpr, rx_digest: Receiver<(String, Digest)>, - tx_sample: Sender, + tx_sample: Sender, snapshotter: Arc, ) { let aligner = Aligner { @@ -107,11 +107,13 @@ impl Aligner { log::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { - let sample = StorageSample { - key_expr: key.into(), - timestamp: ts, - kind: StorageSampleKind::Put(value), - }; + let Value { + payload, encoding, .. + } = value; + let sample = PutSampleBuilder::new(key, payload) + .with_encoding(encoding) + .with_timestamp(ts) + .res_sync(); log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { log::error!("[ALIGNER] Error adding sample to storage: {}", e) @@ -329,7 +331,7 @@ impl Aligner { .get(&selector) .consolidation(zenoh::query::ConsolidationMode::None) .accept_replies(zenoh::query::ReplyKeyExpr::Any) - .res() + .res_async() .await { Ok(replies) => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f2fb0386c3..67ce871bb0 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -19,18 +19,19 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use std::collections::{HashMap, HashSet}; -use std::str::FromStr; +use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::ConsolidationMode; -use zenoh::sample::SampleFields; -use zenoh::sample_builder::{PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait}; +use zenoh::sample_builder::{ + PutSampleBuilder, PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait, +}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_core::SyncResolve; +use zenoh_core::{AsyncResolve, SyncResolve}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -41,152 +42,15 @@ use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; -#[derive(Clone, Debug)] -pub enum StorageSampleKind { - Put(Value), - Delete, -} - -#[derive(Clone, Debug)] -pub struct StorageSample { - pub key_expr: KeyExpr<'static>, - pub timestamp: Timestamp, - pub kind: StorageSampleKind, -} - -impl From for StorageSample { - fn from(sample: Sample) -> Self { - let SampleFields { - key_expr, - timestamp, - kind, - payload, - encoding, - .. - } = sample.into(); - StorageSample { - key_expr, - timestamp: timestamp.unwrap_or(new_reception_timestamp()), - kind: match kind { - SampleKind::Put => { - StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)) - } - SampleKind::Delete => StorageSampleKind::Delete, - }, - } - } -} - #[derive(Clone)] -enum Update { - Put(StoredData), - Delete(Timestamp), -} - -impl From for Update { - fn from(value: StorageSample) -> Self { - match value.kind { - StorageSampleKind::Put(data) => Update::Put(StoredData { - value: data, - timestamp: value.timestamp, - }), - StorageSampleKind::Delete => Update::Delete(value.timestamp), - } - } -} - -impl Update { - fn timestamp(&self) -> &Timestamp { - match self { - Update::Put(data) => &data.timestamp, - Update::Delete(ts) => ts, - } - } -} - -// implement from String for Update -impl TryFrom for Update { - type Error = zenoh::Error; - - fn try_from(value: String) -> Result { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&value)?; - let mut payload = ZBuf::default(); - for slice in result.3 { - payload.push_zslice(slice.to_vec().into()); - } - let value = Value::new(payload).with_encoding(result.2); - let timestamp = Timestamp::from_str(&result.1).map_err(|_| "Error parsing timestamp")?; - if result.0.eq(&(SampleKind::Put).to_string()) { - Ok(Update::Put(StoredData { value, timestamp })) - } else { - Ok(Update::Delete(timestamp)) - } - } -} - -// implement to_string for Update -impl ToString for Update { - fn to_string(&self) -> String { - let result = match self { - Update::Put(data) => ( - SampleKind::Put.to_string(), - data.timestamp.to_string(), - data.value.encoding.to_string(), - data.value.payload.slices().collect::>(), - ), - Update::Delete(ts) => ( - SampleKind::Delete.to_string(), - ts.to_string(), - "".to_string(), - vec![], - ), - }; - serde_json::to_string_pretty(&result).unwrap() - } -} - -trait IntoStorageSample { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>; -} - -impl IntoStorageSample for StoredData { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>, - { - StorageSample { - key_expr: key_expr.into(), - timestamp: self.timestamp, - kind: StorageSampleKind::Put(self.value), - } - } -} - -impl IntoStorageSample for Update { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>, - { - match self { - Update::Put(data) => StorageSample { - key_expr: key_expr.into(), - timestamp: data.timestamp, - kind: StorageSampleKind::Put(data.value), - }, - Update::Delete(ts) => StorageSample { - key_expr: key_expr.into(), - timestamp: ts, - kind: StorageSampleKind::Delete, - }, - } - } +struct Update { + kind: SampleKind, + data: StoredData, } pub struct ReplicationService { pub empty_start: bool, - pub aligner_updates: Receiver, + pub aligner_updates: Receiver, pub log_propagation: Sender<(OwnedKeyExpr, Timestamp)>, } @@ -245,11 +109,10 @@ impl StorageService { let saved_wc = std::fs::read_to_string(zenoh_home().join(WILDCARD_UPDATES_FILENAME)).unwrap(); let saved_wc: HashMap = - serde_json::from_str(&saved_wc).unwrap(); // TODO: Remove unwrap + serde_json::from_str(&saved_wc).unwrap(); let mut wildcard_updates = storage_service.wildcard_updates.write().await; for (k, data) in saved_wc { - wildcard_updates.insert(&k, Update::try_from(data).unwrap()); - // TODO: Remove unwrap + wildcard_updates.insert(&k, construct_update(data)); } } } @@ -325,7 +188,7 @@ impl StorageService { log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { - self.process_sample(sample.into()).await; + self.process_sample(sample).await; } }, // on query on key_expr @@ -377,7 +240,7 @@ impl StorageService { } else { sample }; - self.process_sample(sample.into()).await; + self.process_sample(sample).await; }, // on query on key_expr query = storage_queryable.recv_async() => { @@ -407,46 +270,61 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin - async fn process_sample(&self, sample: StorageSample) { + async fn process_sample(&self, sample: Sample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - // if wildcard, update wildcard_updates - if sample.key_expr.is_wild() { + if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr.is_wild() { - self.get_matching_keys(&sample.key_expr).await + let matching_keys = if sample.key_expr().is_wild() { + self.get_matching_keys(sample.key_expr()).await } else { - vec![sample.key_expr.clone().into()] + vec![sample.key_expr().clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr, + sample.key_expr(), matching_keys ); for k in matching_keys { - if !self.is_deleted(&k.clone(), &sample.timestamp).await + if !self + .is_deleted(&k.clone(), sample.timestamp().unwrap()) + .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, &sample.timestamp).await)) + && self.is_latest(&k, sample.timestamp().unwrap()).await)) { log::trace!( "Sample `{:?}` identified as neded processing for key {}", sample, - &k + k ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store = - match self.ovderriding_wild_update(&k, &sample.timestamp).await { - Some(overriding_update) => overriding_update.into_sample(k.clone()), - - None => sample.clone(), - }; + let sample_to_store = match self + .ovderriding_wild_update(&k, sample.timestamp().unwrap()) + .await + { + Some(overriding_update) => { + let Value { + payload, encoding, .. + } = overriding_update.data.value; + PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) + .with_encoding(encoding) + .with_timestamp(overriding_update.data.timestamp) + .res_sync() + } + None => { + PutSampleBuilder::new(KeyExpr::from(k.clone()), sample.payload().clone()) + .with_encoding(sample.encoding().clone()) + .with_timestamp(*sample.timestamp().unwrap()) + .res_sync() + } + }; - let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { + let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -454,17 +332,23 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = match sample_to_store.kind { - StorageSampleKind::Put(data) => { + let result = match sample.kind() { + SampleKind::Put => { storage - .put(stripped_key, data, sample_to_store.timestamp) + .put( + stripped_key, + Value::new(sample_to_store.payload().clone()) + .with_encoding(sample_to_store.encoding().clone()), + *sample_to_store.timestamp().unwrap(), + ) .await } - StorageSampleKind::Delete => { + SampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, sample_to_store.timestamp).await; + self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) + .await; storage - .delete(stripped_key, sample_to_store.timestamp) + .delete(stripped_key, *sample_to_store.timestamp().unwrap()) .await } }; @@ -478,7 +362,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), sample_to_store.timestamp)); + .send((k.clone(), *sample_to_store.timestamp().unwrap())); match sending { Ok(_) => (), Err(e) => { @@ -509,16 +393,26 @@ impl StorageService { } } - async fn register_wildcard_update(&self, sample: StorageSample) { + async fn register_wildcard_update(&self, sample: Sample) { // @TODO: change into a better store that does incremental writes - let key = sample.key_expr.clone(); + let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; - wildcards.insert(&key, sample.into()); + let timestamp = *sample.timestamp().unwrap(); + wildcards.insert( + &key, + Update { + kind: sample.kind(), + data: StoredData { + value: Value::from(sample), + timestamp, + }, + }, + ); if self.capability.persistence.eq(&Persistence::Durable) { // flush to disk to makeit durable let mut serialized_data = HashMap::new(); for (k, update) in wildcards.key_value_pairs() { - serialized_data.insert(k, update.to_string()); + serialized_data.insert(k, serialize_update(update)); } if let Err(e) = std::fs::write( zenoh_home().join(WILDCARD_UPDATES_FILENAME), @@ -547,36 +441,34 @@ impl StorageService { let mut update = None; for node in wildcards.intersecting_keys(key_expr) { let weight = wildcards.weight_at(&node); - if let Some(weight) = weight { - if weight.timestamp() > ts { - // if the key matches a wild card update, check whether it was saved in storage - // remember that wild card updates change only existing keys - let stripped_key = match self.strip_prefix(&key_expr.into()) { - Ok(stripped) => stripped, - Err(e) => { - log::error!("{}", e); - break; - } - }; - let mut storage = self.storage.lock().await; - match storage.get(stripped_key, "").await { - Ok(stored_data) => { - for entry in stored_data { - if entry.timestamp > *ts { - return None; - } + if weight.is_some() && weight.unwrap().data.timestamp > *ts { + // if the key matches a wild card update, check whether it was saved in storage + // remember that wild card updates change only existing keys + let stripped_key = match self.strip_prefix(&key_expr.into()) { + Ok(stripped) => stripped, + Err(e) => { + log::error!("{}", e); + break; + } + }; + let mut storage = self.storage.lock().await; + match storage.get(stripped_key, "").await { + Ok(stored_data) => { + for entry in stored_data { + if entry.timestamp > *ts { + return None; } } - Err(e) => { - log::warn!( - "Storage '{}' raised an error fetching a query on key {} : {}", - self.name, - key_expr, - e - ); - ts = weight.timestamp(); - update = Some(weight.clone()); - } + } + Err(e) => { + log::warn!( + "Storage '{}' raised an error fetching a query on key {} : {}", + self.name, + key_expr, + e + ); + ts = &weight.unwrap().data.timestamp; + update = Some(weight.unwrap().clone()); } } } @@ -629,8 +521,12 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { + let Value { + payload, encoding, .. + } = entry.value; if let Err(e) = q - .reply(key.clone(), entry.value.payload) + .reply(key.clone(), payload) + .with_encoding(encoding) .with_timestamp(entry.timestamp) .res_async() .await @@ -776,7 +672,7 @@ impl StorageService { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - self.process_sample(sample.into()).await; + self.process_sample(sample).await; } Err(e) => log::warn!( "Storage '{}' received an error to align query: {:?}", @@ -789,6 +685,35 @@ impl StorageService { } } +fn serialize_update(update: &Update) -> String { + let result = ( + update.kind.to_string(), + update.data.timestamp.to_string(), + update.data.value.encoding.to_string(), + update.data.value.payload.slices().collect::>(), + ); + serde_json::to_string_pretty(&result).unwrap() +} + +fn construct_update(data: String) -> Update { + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() + let mut payload = ZBuf::default(); + for slice in result.3 { + payload.push_zslice(slice.to_vec().into()); + } + let value = Value::new(payload).with_encoding(result.2); + let data = StoredData { + value, + timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() + }; + let kind = if result.0.eq(&(SampleKind::Put).to_string()) { + SampleKind::Put + } else { + SampleKind::Delete + }; + Update { kind, data } +} + // Periodic event cleaning-up data info for old metadata struct GarbageCollectionEvent { config: GarbageCollectionConfig, @@ -820,7 +745,7 @@ impl Timed for GarbageCollectionEvent { let mut to_be_removed = HashSet::new(); for (k, update) in wildcard_updates.key_value_pairs() { - let ts = update.timestamp(); + let ts = update.data.timestamp; if ts.get_time() < &time_limit { // mark key to be removed to_be_removed.insert(k); From 65a4d7f8646b159641df015a9a47608d5bae26af Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 25 Mar 2024 10:54:56 +0100 Subject: [PATCH 045/357] interceptors removed from plugin storage API (#859) --- plugins/zenoh-backend-example/src/lib.rs | 13 +--- plugins/zenoh-backend-traits/src/lib.rs | 68 +------------------ .../src/backends_mgt.rs | 7 -- .../zenoh-plugin-storage-manager/src/lib.rs | 4 -- .../src/memory_backend/mod.rs | 20 ------ .../src/replica/storage.rs | 23 ------- 6 files changed, 3 insertions(+), 132 deletions(-) diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 602d29f375..f81231a498 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -13,11 +13,8 @@ // use async_std::sync::RwLock; use async_trait::async_trait; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; -use zenoh::{prelude::OwnedKeyExpr, sample::Sample, time::Timestamp, value::Value}; +use std::collections::{hash_map::Entry, HashMap}; +use zenoh::{prelude::OwnedKeyExpr, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, @@ -71,12 +68,6 @@ impl Volume for ExampleBackend { async fn create_storage(&self, _props: StorageConfig) -> ZResult> { Ok(Box::::default()) } - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } } #[async_trait] diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index d17e6dfd77..40d022f1ec 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -68,16 +68,6 @@ //! // The properties are the ones passed via a PUT in the admin space for Storage creation. //! Ok(Box::new(MyStorage::new(properties).await?)) //! } -//! -//! fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { -//! // No interception point for incoming data (on PUT operations) -//! None -//! } -//! -//! fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { -//! // No interception point for outgoing data (on GET operations) -//! None -//! } //! } //! //! // Your Storage implementation @@ -135,9 +125,7 @@ use async_trait::async_trait; use const_format::concatcp; -use std::sync::Arc; -use zenoh::prelude::{KeyExpr, OwnedKeyExpr, Sample, Selector}; -use zenoh::queryable::ReplyBuilder; +use zenoh::prelude::OwnedKeyExpr; use zenoh::time::Timestamp; use zenoh::value::Value; pub use zenoh::Result as ZResult; @@ -210,14 +198,6 @@ pub trait Volume: Send + Sync { /// Creates a storage configured with some properties. async fn create_storage(&self, props: StorageConfig) -> ZResult>; - - /// Returns an interceptor that will be called before pushing any data - /// into a storage created by this backend. `None` can be returned for no interception point. - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>>; - - /// Returns an interceptor that will be called before sending any reply - /// to a query from a storage created by this backend. `None` can be returned for no interception point. - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>>; } pub type VolumeInstance = Box; @@ -282,49 +262,3 @@ pub trait Storage: Send + Sync { /// Remember to fetch the entry corresponding to the `None` key async fn get_all_entries(&self) -> ZResult, Timestamp)>>; } - -/// A wrapper around the [`zenoh::queryable::Query`] allowing to call the -/// OutgoingDataInterceptor (if any) before to send the reply -pub struct Query { - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, -} - -impl Query { - pub fn new( - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, - ) -> Query { - Query { q, interceptor } - } - - /// The full [`Selector`] of this Query. - #[inline(always)] - pub fn selector(&self) -> Selector<'_> { - self.q.selector() - } - - /// The key selector part of this Query. - #[inline(always)] - pub fn key_expr(&self) -> &KeyExpr<'static> { - self.q.key_expr() - } - - /// This Query's selector parameters. - #[inline(always)] - pub fn parameters(&self) -> &str { - self.q.parameters() - } - - /// Sends a Sample as a reply to this Query - pub fn reply(&self, sample: Sample) -> ReplyBuilder<'_> { - // Call outgoing intercerceptor - let sample = if let Some(ref interceptor) = self.interceptor { - interceptor(sample) - } else { - sample - }; - // Send reply - self.q.reply_sample(sample) - } -} diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index aa7260e868..90a6ae6250 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,7 +14,6 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; -use zenoh::prelude::r#async::*; use zenoh::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; @@ -23,16 +22,12 @@ use zenoh_result::ZResult; pub struct StoreIntercept { pub storage: Box, pub capability: Capability, - pub in_interceptor: Option Sample + Send + Sync>>, - pub out_interceptor: Option Sample + Send + Sync>>, } pub(crate) async fn create_and_start_storage( admin_key: String, config: StorageConfig, backend: &VolumeInstance, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, zenoh: Arc, ) -> ZResult> { log::trace!("Create storage '{}'", &admin_key); @@ -41,8 +36,6 @@ pub(crate) async fn create_and_start_storage( let store_intercept = StoreIntercept { storage, capability, - in_interceptor, - out_interceptor, }; start_storage(store_intercept, config, admin_key, zenoh).await diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 0db30bbd6a..91df2f108d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -239,14 +239,10 @@ impl StorageRuntimeInner { volume_id, backend.name() ); - let in_interceptor = backend.instance().incoming_data_interceptor(); - let out_interceptor = backend.instance().outgoing_data_interceptor(); let stopper = async_std::task::block_on(create_and_start_storage( admin_key, storage.clone(), backend.instance(), - in_interceptor, - out_interceptor, self.session.clone(), ))?; self.storages diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index ebb4922c9d..4e333b8592 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -61,26 +61,6 @@ impl Volume for MemoryBackend { log::debug!("Create Memory Storage with configuration: {:?}", properties); Ok(Box::new(MemoryStorage::new(properties).await?)) } - - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!(">>>> IN INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } - - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!("<<<< OUT INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } } impl Drop for MemoryBackend { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 0708dcabd9..35134dfe43 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -60,8 +60,6 @@ pub struct StorageService { capability: Capability, tombstones: Arc>>, wildcard_updates: Arc>>, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, replication: Option, } @@ -85,8 +83,6 @@ impl StorageService { capability: store_intercept.capability, tombstones: Arc::new(RwLock::new(KeBoxTree::default())), wildcard_updates: Arc::new(RwLock::new(KeBoxTree::default())), - in_interceptor: store_intercept.in_interceptor, - out_interceptor: store_intercept.out_interceptor, replication, }; if storage_service @@ -263,13 +259,6 @@ impl StorageService { // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - // Call incoming data interceptor (if any) - let sample = if let Some(ref interceptor) = self.in_interceptor { - interceptor(sample) - } else { - sample - }; - // if wildcard, update wildcard_updates if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; @@ -523,12 +512,6 @@ impl StorageService { let sample = Sample::new(key.clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", @@ -561,12 +544,6 @@ impl StorageService { let sample = Sample::new(q.key_expr().clone(), payload) .with_encoding(encoding) .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", From 48d8d776986ba31d0030b23250be00da93043b64 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 25 Mar 2024 11:49:52 +0100 Subject: [PATCH 046/357] separate qosbuilder trait --- zenoh/src/queryable.rs | 8 +++++++- zenoh/src/sample_builder.rs | 18 +++++++++++++++--- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index f2e00e47c6..68b27526ce 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -21,7 +21,7 @@ use crate::prelude::*; use crate::sample::SourceInfo; use crate::sample_builder::{ DeleteSampleBuilder, DeleteSampleBuilderTrait, PutSampleBuilder, PutSampleBuilderTrait, - SampleBuilder, SampleBuilderTrait, + QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, }; use crate::Id; use crate::SessionRef; @@ -287,7 +287,9 @@ impl SampleBuilderTrait for ReplySampleBuilder<'_> { ..self } } +} +impl QoSBuilderTrait for ReplySampleBuilder<'_> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { sample_builder: self.sample_builder.congestion_control(congestion_control), @@ -366,7 +368,9 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { ..self } } +} +impl QoSBuilderTrait for ReplyBuilder<'_> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { sample_builder: self.sample_builder.congestion_control(congestion_control), @@ -464,7 +468,9 @@ impl SampleBuilderTrait for ReplyDelBuilder<'_> { ..self } } +} +impl QoSBuilderTrait for ReplyDelBuilder<'_> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { sample_builder: self.sample_builder.congestion_control(congestion_control), diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index c7ee6e8368..7545646b91 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -28,6 +28,12 @@ use zenoh_core::Resolvable; use zenoh_core::SyncResolve; use zenoh_protocol::core::CongestionControl; +pub trait QoSBuilderTrait { + fn congestion_control(self, congestion_control: CongestionControl) -> Self; + fn priority(self, priority: Priority) -> Self; + fn express(self, is_express: bool) -> Self; +} + pub trait SampleBuilderTrait { fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self where @@ -40,9 +46,6 @@ pub trait SampleBuilderTrait { fn with_attachment_opt(self, attachment: Option) -> Self; #[zenoh_macros::unstable] fn with_attachment(self, attachment: Attachment) -> Self; - fn congestion_control(self, congestion_control: CongestionControl) -> Self; - fn priority(self, priority: Priority) -> Self; - fn express(self, is_express: bool) -> Self; } pub trait PutSampleBuilderTrait: SampleBuilderTrait { @@ -119,6 +122,9 @@ impl SampleBuilderTrait for SampleBuilder { fn with_attachment(self, attachment: Attachment) -> Self { self.with_attachment_opt(Some(attachment)) } +} + +impl QoSBuilderTrait for SampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(Sample { qos: self.0.qos.with_congestion_control(congestion_control), @@ -201,6 +207,9 @@ impl SampleBuilderTrait for PutSampleBuilder { fn with_attachment_opt(self, attachment: Option) -> Self { Self(self.0.with_attachment_opt(attachment)) } +} + +impl QoSBuilderTrait for PutSampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(self.0.congestion_control(congestion_control)) } @@ -291,6 +300,9 @@ impl SampleBuilderTrait for DeleteSampleBuilder { fn with_attachment_opt(self, attachment: Option) -> Self { Self(self.0.with_attachment_opt(attachment)) } +} + +impl QoSBuilderTrait for DeleteSampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self(self.0.congestion_control(congestion_control)) } From 322a4e06b0acdc5dc25be1b6e4abcd0a5c04bf82 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 25 Mar 2024 15:56:17 +0100 Subject: [PATCH 047/357] removed `with_keyexpr` from trait --- zenoh/src/queryable.rs | 30 -------------------------- zenoh/src/sample_builder.rs | 42 ++++++++++++++++++++----------------- 2 files changed, 23 insertions(+), 49 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 68b27526ce..9edb9fb26c 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -240,16 +240,6 @@ impl<'a> ReplySampleBuilder<'a> { } impl SampleBuilderTrait for ReplySampleBuilder<'_> { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self { - sample_builder: self.sample_builder.with_keyexpr(key_expr), - ..self - } - } - fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), @@ -321,16 +311,6 @@ pub struct ReplyBuilder<'a> { } impl SampleBuilderTrait for ReplyBuilder<'_> { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self { - sample_builder: self.sample_builder.with_keyexpr(key_expr), - ..self - } - } - fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), @@ -421,16 +401,6 @@ pub struct ReplyDelBuilder<'a> { } impl SampleBuilderTrait for ReplyDelBuilder<'_> { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self { - sample_builder: self.sample_builder.with_keyexpr(key_expr), - ..self - } - } - fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 7545646b91..0df98773fc 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -29,15 +29,17 @@ use zenoh_core::SyncResolve; use zenoh_protocol::core::CongestionControl; pub trait QoSBuilderTrait { + /// Change the `congestion_control` to apply when routing the data. fn congestion_control(self, congestion_control: CongestionControl) -> Self; + /// Change the priority of the written data. fn priority(self, priority: Priority) -> Self; + /// Change the `express` policy to apply when routing the data. + /// When express is set to `true`, then the message will not be batched. + /// This usually has a positive impact on latency but negative impact on throughput. fn express(self, is_express: bool) -> Self; } pub trait SampleBuilderTrait { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>; fn with_timestamp_opt(self, timestamp: Option) -> Self; fn with_timestamp(self, timestamp: Timestamp) -> Self; #[zenoh_macros::unstable] @@ -78,10 +80,8 @@ impl SampleBuilder { attachment: None, }) } -} - -impl SampleBuilderTrait for SampleBuilder { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + /// Allows to change keyexpr of [`Sample`] + pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { @@ -90,7 +90,9 @@ impl SampleBuilderTrait for SampleBuilder { ..self.0 }) } +} +impl SampleBuilderTrait for SampleBuilder { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self(Sample { timestamp, @@ -176,6 +178,13 @@ impl PutSampleBuilder { attachment: None, })) } + /// Allows to change keyexpr of [`Sample`] + pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(self.0.with_keyexpr(key_expr)) + } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. pub(crate) fn with_qos(self, qos: QoS) -> Self { Self(SampleBuilder(Sample { qos, ..self.0 .0 })) @@ -183,12 +192,6 @@ impl PutSampleBuilder { } impl SampleBuilderTrait for PutSampleBuilder { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(self.0.with_keyexpr(key_expr)) - } fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } @@ -269,6 +272,13 @@ impl DeleteSampleBuilder { attachment: None, })) } + /// Allows to change keyexpr of [`Sample`] + pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self(self.0.with_keyexpr(key_expr)) + } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. pub(crate) fn with_qos(self, qos: QoS) -> Self { Self(SampleBuilder(Sample { qos, ..self.0 .0 })) @@ -276,12 +286,6 @@ impl DeleteSampleBuilder { } impl SampleBuilderTrait for DeleteSampleBuilder { - fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(self.0.with_keyexpr(key_expr)) - } fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } From 9515c7d63bec0744d9a1bf2e86b7242ee9121480 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 25 Mar 2024 17:36:36 +0100 Subject: [PATCH 048/357] put, delete builder --- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 1 + zenoh-ext/src/group.rs | 1 + zenoh/src/publication.rs | 284 +++++++++++++----- zenoh/src/sample_builder.rs | 6 + zenoh/src/session.rs | 12 +- zenoh/tests/qos.rs | 1 + zenoh/tests/routing.rs | 1 + zenoh/tests/session.rs | 1 + zenoh/tests/unicity.rs | 1 + 10 files changed, 233 insertions(+), 76 deletions(-) diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index bb76005d6e..48f152e488 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,6 +15,7 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh::{config::Config, key_expr::keyexpr}; const HTML: &str = r#" diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index e2718f6579..cc97590636 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -34,6 +34,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; +use zenoh::sample_builder::PutSampleBuilderTrait; use zenoh::selector::TIME_RANGE_KEY; use zenoh::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 41007d8b87..973baf271b 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -28,6 +28,7 @@ use std::time::{Duration, Instant}; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh::Error as ZError; use zenoh::Result as ZResult; use zenoh::Session; diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 392c0bf8c1..97f485f1e3 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -18,6 +18,9 @@ use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleKind}; +use crate::sample_builder::{ + DeleteSampleBuilderTrait, PutSampleBuilderTrait, QoSBuilderTrait, SampleBuilderTrait, +}; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] @@ -56,7 +59,14 @@ pub use zenoh_protocol::core::CongestionControl; /// .unwrap(); /// # }) /// ``` -pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; +pub struct DeleteBuilder<'a, 'b> { + pub(crate) publisher: PublisherBuilder<'a, 'b>, + pub(crate) timestamp: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: Option, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} /// A builder for initializing a [`put`](crate::Session::put) operation. /// @@ -81,36 +91,141 @@ pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; pub struct PutBuilder<'a, 'b> { pub(crate) publisher: PublisherBuilder<'a, 'b>, pub(crate) payload: Payload, - pub(crate) kind: SampleKind, pub(crate) encoding: Encoding, + pub(crate) timestamp: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: Option, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } -impl PutBuilder<'_, '_> { - /// Change the `congestion_control` to apply when routing the data. +impl QoSBuilderTrait for PutBuilder<'_, '_> { #[inline] - pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { - self.publisher = self.publisher.congestion_control(congestion_control); - self + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + publisher: self.publisher.congestion_control(congestion_control), + ..self + } } - - /// Change the priority of the written data. #[inline] - pub fn priority(mut self, priority: Priority) -> Self { - self.publisher = self.publisher.priority(priority); - self + fn priority(self, priority: Priority) -> Self { + Self { + publisher: self.publisher.priority(priority), + ..self + } } + #[inline] + fn express(self, is_express: bool) -> Self { + Self { + publisher: self.publisher.express(is_express), + ..self + } + } +} - /// Change the `express` policy to apply when routing the data. - /// When express is set to `true`, then the message will not be batched. - /// This usually has a positive impact on latency but negative impact on throughput. +impl QoSBuilderTrait for DeleteBuilder<'_, '_> { #[inline] - pub fn express(mut self, is_express: bool) -> Self { - self.publisher = self.publisher.express(is_express); - self + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + publisher: self.publisher.congestion_control(congestion_control), + ..self + } + } + #[inline] + fn priority(self, priority: Priority) -> Self { + Self { + publisher: self.publisher.priority(priority), + ..self + } + } + #[inline] + fn express(self, is_express: bool) -> Self { + Self { + publisher: self.publisher.express(is_express), + ..self + } + } +} + +impl SampleBuilderTrait for PutBuilder<'_, '_> { + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { timestamp, ..self } + } + fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } + } + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info: Some(source_info), + ..self + } + } + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { attachment, ..self } + } + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + attachment: Some(attachment), + ..self + } + } +} + +impl SampleBuilderTrait for DeleteBuilder<'_, '_> { + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { timestamp, ..self } + } + fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } + } + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info: Some(source_info), + ..self + } + } + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { attachment, ..self } + } + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + attachment: Some(attachment), + ..self + } + } +} + +impl PutSampleBuilderTrait for PutBuilder<'_, '_> { + fn with_encoding(self, encoding: Encoding) -> Self { + Self { encoding, ..self } + } + + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + Self { + payload: payload.into(), + ..self + } } +} +impl DeleteSampleBuilderTrait for DeleteBuilder<'_, '_> {} + +impl PutBuilder<'_, '_> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -119,21 +234,15 @@ impl PutBuilder<'_, '_> { self.publisher = self.publisher.allowed_destination(destination); self } +} - /// Set the [`Encoding`] of the written data. - #[inline] - pub fn with_encoding(mut self, encoding: IntoEncoding) -> Self - where - IntoEncoding: Into, - { - self.encoding = encoding.into(); - self - } - +impl DeleteBuilder<'_, '_> { + /// Restrict the matching subscribers that will receive the published data + /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] - /// Attach user-provided data to the written data. - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); + #[inline] + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.publisher = self.publisher.allowed_destination(destination); self } } @@ -142,36 +251,40 @@ impl Resolvable for PutBuilder<'_, '_> { type To = ZResult<()>; } +impl Resolvable for DeleteBuilder<'_, '_> { + type To = ZResult<()>; +} + impl SyncResolve for PutBuilder<'_, '_> { #[inline] fn res_sync(self) -> ::To { - let PublisherBuilder { - session, - key_expr, - congestion_control, - priority, - is_express, - destination, - } = self.publisher; - - let publisher = Publisher { - session, - #[cfg(feature = "unstable")] - eid: 0, // This is a one shot Publisher - key_expr: key_expr?, - congestion_control, - priority, - is_express, - destination, - }; - + let publisher = self.publisher.one_time_res_sync()?; resolve_put( &publisher, self.payload, - self.kind, + SampleKind::Put, self.encoding, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl SyncResolve for DeleteBuilder<'_, '_> { + #[inline] + fn res_sync(self) -> ::To { + let publisher = self.publisher.one_time_res_sync()?; + resolve_put( + &publisher, + Payload::empty(), + SampleKind::Delete, + Encoding::ZENOH_BYTES, + self.timestamp, #[cfg(feature = "unstable")] - None, + self.source_info, #[cfg(feature = "unstable")] self.attachment, ) @@ -186,6 +299,14 @@ impl AsyncResolve for PutBuilder<'_, '_> { } } +impl AsyncResolve for DeleteBuilder<'_, '_> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + use futures::Sink; use std::convert::TryFrom; use std::convert::TryInto; @@ -293,25 +414,22 @@ impl<'a> Publisher<'a> { /// Change the `congestion_control` to apply when routing the data. #[inline] - pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { + pub fn set_congestion_control(&mut self, congestion_control: CongestionControl) { self.congestion_control = congestion_control; - self } /// Change the priority of the written data. #[inline] - pub fn priority(mut self, priority: Priority) -> Self { + pub fn set_priority(&mut self, priority: Priority) { self.priority = priority; - self } /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { + pub fn set_allowed_destination(&mut self, destination: Locality) { self.destination = destination; - self } /// Consumes the given `Publisher`, returning a thread-safe reference-counting @@ -355,6 +473,7 @@ impl<'a> Publisher<'a> { payload, kind, encoding: Encoding::ZENOH_BYTES, + timestamp: None, #[cfg(feature = "unstable")] source_info: None, #[cfg(feature = "unstable")] @@ -625,6 +744,7 @@ pub struct Publication<'a> { payload: Payload, kind: SampleKind, encoding: Encoding, + timestamp: Option, #[cfg(feature = "unstable")] pub(crate) source_info: Option, #[cfg(feature = "unstable")] @@ -676,6 +796,7 @@ impl SyncResolve for Publication<'_> { self.payload, self.kind, self.encoding, + self.timestamp, #[cfg(feature = "unstable")] self.source_info, #[cfg(feature = "unstable")] @@ -707,6 +828,7 @@ impl<'a> Sink for Publisher<'a> { payload: item.payload, kind: item.kind, encoding: item.encoding, + timestamp: None, #[cfg(feature = "unstable")] source_info: None, #[cfg(feature = "unstable")] @@ -770,30 +892,32 @@ impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { } } -impl<'a, 'b> PublisherBuilder<'a, 'b> { +impl QoSBuilderTrait for PublisherBuilder<'_, '_> { /// Change the `congestion_control` to apply when routing the data. #[inline] - pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { - self.congestion_control = congestion_control; - self + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + congestion_control, + ..self + } } /// Change the priority of the written data. #[inline] - pub fn priority(mut self, priority: Priority) -> Self { - self.priority = priority; - self + fn priority(self, priority: Priority) -> Self { + Self { priority, ..self } } /// Change the `express` policy to apply when routing the data. /// When express is set to `true`, then the message will not be batched. /// This usually has a positive impact on latency but negative impact on throughput. #[inline] - pub fn express(mut self, is_express: bool) -> Self { - self.is_express = is_express; - self + fn express(self, is_express: bool) -> Self { + Self { is_express, ..self } } +} +impl<'a, 'b> PublisherBuilder<'a, 'b> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -802,6 +926,20 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { self.destination = destination; self } + + // internal function for `PutBuilder` and `DeleteBuilder` + fn one_time_res_sync(self) -> ZResult> { + Ok(Publisher { + session: self.session, + #[cfg(feature = "unstable")] + eid: 0, // This is a one shot Publisher + key_expr: self.key_expr?, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + }) + } } impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { @@ -874,6 +1012,7 @@ fn resolve_put( payload: Payload, kind: SampleKind, encoding: Encoding, + timestamp: Option, #[cfg(feature = "unstable")] source_info: Option, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { @@ -883,8 +1022,11 @@ fn resolve_put( .as_ref() .unwrap() .clone(); - let timestamp = publisher.session.runtime.new_timestamp(); - + let timestamp = if timestamp.is_none() { + publisher.session.runtime.new_timestamp() + } else { + timestamp + }; if publisher.destination != Locality::SessionLocal { primitives.send_push(Push { wire_expr: publisher.key_expr.to_wire(&publisher.session).to_owned(), diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 0df98773fc..1710cbc85b 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -40,17 +40,23 @@ pub trait QoSBuilderTrait { } pub trait SampleBuilderTrait { + /// Sets of clears timestamp fn with_timestamp_opt(self, timestamp: Option) -> Self; + /// Sets timestamp fn with_timestamp(self, timestamp: Timestamp) -> Self; + /// Attach source information #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self; + /// Attach or remove user-provided data in key-value format #[zenoh_macros::unstable] fn with_attachment_opt(self, attachment: Option) -> Self; + /// Attach user-provided data in key-value format #[zenoh_macros::unstable] fn with_attachment(self, attachment: Attachment) -> Self; } pub trait PutSampleBuilderTrait: SampleBuilderTrait { + /// Set the [`Encoding`] fn with_encoding(self, encoding: Encoding) -> Self; fn with_payload(self, payload: IntoPayload) -> Self where diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 89c18ec4a8..e26bdeadaf 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -706,10 +706,12 @@ impl Session { PutBuilder { publisher: self.declare_publisher(key_expr), payload: payload.into(), - kind: SampleKind::Put, + timestamp: None, encoding: Encoding::default(), #[cfg(feature = "unstable")] attachment: None, + #[cfg(feature = "unstable")] + source_info: None, } } @@ -737,13 +739,13 @@ impl Session { TryIntoKeyExpr: TryInto>, >>::Error: Into, { - PutBuilder { + DeleteBuilder { publisher: self.declare_publisher(key_expr), - payload: Payload::empty(), - kind: SampleKind::Delete, - encoding: Encoding::default(), + timestamp: None, #[cfg(feature = "unstable")] attachment: None, + #[cfg(feature = "unstable")] + source_info: None, } } /// Query data from the matching queryables in the system. diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 1a9df306b2..8dc39423cb 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -15,6 +15,7 @@ use async_std::prelude::FutureExt; use async_std::task; use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh::{publication::Priority, SessionDeclarations}; use zenoh_core::zasync_executor_init; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 06a8f5da45..123550852e 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -20,6 +20,7 @@ use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh::{value::Value, Result}; use zenoh_core::zasync_executor_init; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index e3f5e2df63..955ec7a73f 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -17,6 +17,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh_core::zasync_executor_init; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 8eb007b0c0..3d1327398d 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -17,6 +17,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh_core::zasync_executor_init; const TIMEOUT: Duration = Duration::from_secs(60); From 4e14cf9e24f5bc7ba2cde3e1494f398d58ed1415 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 26 Mar 2024 01:49:14 +0100 Subject: [PATCH 049/357] build fixes --- examples/examples/z_ping.rs | 1 + examples/examples/z_pong.rs | 1 + examples/examples/z_pub_thr.rs | 1 + zenoh/tests/attachments.rs | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index a57c937e48..b40afc1f53 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -16,6 +16,7 @@ use std::time::{Duration, Instant}; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index baa5683f62..0003958b5d 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 4354ad2e68..7e7c1ac9b5 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -16,6 +16,7 @@ use clap::Parser; use std::convert::TryInto; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 8d26cc0344..04ed28b761 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -1,7 +1,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::prelude::sync::*; + use zenoh::{prelude::sync::*, sample_builder::SampleBuilderTrait}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh From c7cc5758138ba6e9fea380acf1605cee2d650624 Mon Sep 17 00:00:00 2001 From: "ChenYing Kuo (CY)" Date: Tue, 26 Mar 2024 19:46:29 +0800 Subject: [PATCH 050/357] Support RingBuffer to get the latest sample. (#851) * Add RingQueue to support getting the latest sample. Signed-off-by: ChenYing Kuo * Rename RingQueue to RingBuffer. Signed-off-by: ChenYing Kuo * Update examples. Signed-off-by: ChenYing Kuo * Add document. Signed-off-by: ChenYing Kuo * Add test for RingBuffer. Signed-off-by: ChenYing Kuo * Use the correct naming convention (CameCase) Signed-off-by: ChenYing Kuo * Add file header. Signed-off-by: ChenYing Kuo * gename z_pull and update the usage. Signed-off-by: ChenYing Kuo * Use ring instead of cache. Signed-off-by: ChenYing Kuo * Add sleep to wait for the result in pubsub_with_ringbuffer. Signed-off-by: ChenYing Kuo --------- Signed-off-by: ChenYing Kuo --- examples/README.md | 4 +- examples/examples/z_pull.rs | 38 +++++++----------- zenoh/src/handlers.rs | 52 ++++++++++++++++++++++++ zenoh/tests/attachments.rs | 13 ++++++ zenoh/tests/formatters.rs | 13 ++++++ zenoh/tests/handler.rs | 80 +++++++++++++++++++++++++++++++++++++ zenoh/tests/interceptors.rs | 13 ++++++ 7 files changed, 188 insertions(+), 25 deletions(-) create mode 100644 zenoh/tests/handler.rs diff --git a/examples/README.md b/examples/README.md index 8e5b3085ba..dab1c99911 100644 --- a/examples/README.md +++ b/examples/README.md @@ -80,7 +80,7 @@ ### z_pull Declares a key expression and a pull subscriber. - On each pull, the pull subscriber will be notified of the last `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. + On each pull, the pull subscriber will be notified of the last N `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. Typical usage: @@ -89,7 +89,7 @@ ``` or ```bash - z_pull -k demo/** + z_pull -k demo/** --size 3 ``` ### z_get diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index d2c9a5380b..9d64b7b758 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,12 +13,8 @@ // use async_std::task::sleep; use clap::Parser; -use std::{ - sync::{Arc, Mutex}, - time::Duration, -}; -use zenoh::{config::Config, prelude::r#async::*}; -use zenoh_collections::RingBuffer; +use std::time::Duration; +use zenoh::{config::Config, handlers::RingBuffer, prelude::r#async::*}; use zenoh_examples::CommonArgs; #[async_std::main] @@ -26,31 +22,24 @@ async fn main() { // initiate logging env_logger::init(); - let (config, key_expr, cache, interval) = parse_args(); + let (config, key_expr, size, interval) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); - println!("Creating a local queue keeping the last {cache} elements..."); - let arb = Arc::new(Mutex::new(RingBuffer::new(cache))); - let arb_c = arb.clone(); - println!("Declaring Subscriber on '{key_expr}'..."); - let _subscriber = session + let subscriber = session .declare_subscriber(&key_expr) - .callback(move |sample| { - arb_c.lock().unwrap().push_force(sample); - }) + .with(RingBuffer::new(size)) .res() .await .unwrap(); println!("Pulling data every {:#?} seconds", interval); loop { - let mut res = arb.lock().unwrap().pull(); print!(">> [Subscriber] Pulling "); - match res.take() { - Some(sample) => { + match subscriber.recv() { + Ok(Some(sample)) => { let payload = sample .payload() .deserialize::() @@ -62,10 +51,13 @@ async fn main() { payload, ); } - None => { + Ok(None) => { println!("nothing... sleep for {:#?}", interval); sleep(interval).await; } + Err(e) => { + println!("Pull error: {e}"); + } } } } @@ -75,10 +67,10 @@ struct SubArgs { #[arg(short, long, default_value = "demo/example/**")] /// The Key Expression to subscribe to. key: KeyExpr<'static>, - /// The size of the cache. + /// The size of the ringbuffer. #[arg(long, default_value = "3")] - cache: usize, - /// The interval for pulling the cache. + size: usize, + /// The interval for pulling the ringbuffer. #[arg(long, default_value = "5.0")] interval: f32, #[command(flatten)] @@ -88,5 +80,5 @@ struct SubArgs { fn parse_args() -> (Config, KeyExpr<'static>, usize, Duration) { let args = SubArgs::parse(); let interval = Duration::from_secs_f32(args.interval); - (args.common.into(), args.key, args.cache, interval) + (args.common.into(), args.key, args.size, interval) } diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs index e5ec3bb0dc..c5d2c6bb90 100644 --- a/zenoh/src/handlers.rs +++ b/zenoh/src/handlers.rs @@ -15,6 +15,10 @@ //! Callback handler trait. use crate::API_DATA_RECEPTION_CHANNEL_SIZE; +use std::sync::{Arc, Mutex, Weak}; +use zenoh_collections::RingBuffer as RingBufferInner; +use zenoh_result::ZResult; + /// An alias for `Arc`. pub type Dyn = std::sync::Arc; @@ -88,6 +92,54 @@ impl IntoHandler<'static, T> } } +/// Ring buffer with a limited queue size, which allows users to keep the last N data. +pub struct RingBuffer { + ring: Arc>>, +} + +impl RingBuffer { + /// Initialize the RingBuffer with the capacity size. + pub fn new(capacity: usize) -> Self { + RingBuffer { + ring: Arc::new(Mutex::new(RingBufferInner::new(capacity))), + } + } +} + +pub struct RingBufferHandler { + ring: Weak>>, +} + +impl RingBufferHandler { + pub fn recv(&self) -> ZResult> { + let Some(ring) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + let mut guard = ring.lock().map_err(|e| zerror!("{}", e))?; + Ok(guard.pull()) + } +} + +impl IntoHandler<'static, T> for RingBuffer { + type Handler = RingBufferHandler; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let receiver = RingBufferHandler { + ring: Arc::downgrade(&self.ring), + }; + ( + Dyn::new(move |t| match self.ring.lock() { + Ok(mut g) => { + // Eventually drop the oldest element. + g.push_force(t); + } + Err(e) => log::error!("{}", e), + }), + receiver, + ) + } +} + /// A function that can transform a [`FnMut`]`(T)` to /// a [`Fn`]`(T)` with the help of a [`Mutex`](std::sync::Mutex). pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 38d03b0a84..603939bc0e 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -1,3 +1,16 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// #[cfg(feature = "unstable")] #[test] fn pubsub() { diff --git a/zenoh/tests/formatters.rs b/zenoh/tests/formatters.rs index ae894e44b6..22600b6cc0 100644 --- a/zenoh/tests/formatters.rs +++ b/zenoh/tests/formatters.rs @@ -1,3 +1,16 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// #[test] fn reuse() { zenoh::kedefine!( diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs new file mode 100644 index 0000000000..c1e912fc75 --- /dev/null +++ b/zenoh/tests/handler.rs @@ -0,0 +1,80 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#[test] +fn pubsub_with_ringbuffer() { + use std::{thread, time::Duration}; + use zenoh::{handlers::RingBuffer, prelude::sync::*}; + + let zenoh = zenoh::open(Config::default()).res().unwrap(); + let sub = zenoh + .declare_subscriber("test/ringbuffer") + .with(RingBuffer::new(3)) + .res() + .unwrap(); + for i in 0..10 { + zenoh + .put("test/ringbuffer", format!("put{i}")) + .res() + .unwrap(); + } + // Should only receive the last three samples ("put7", "put8", "put9") + for i in 7..10 { + assert_eq!( + sub.recv() + .unwrap() + .unwrap() + .payload() + .deserialize::() + .unwrap(), + format!("put{i}") + ); + } + // Wait for the subscriber to get the value + thread::sleep(Duration::from_millis(1000)); +} + +#[test] +fn query_with_ringbuffer() { + use zenoh::{handlers::RingBuffer, prelude::sync::*}; + + let zenoh = zenoh::open(Config::default()).res().unwrap(); + let queryable = zenoh + .declare_queryable("test/ringbuffer_query") + .with(RingBuffer::new(1)) + .res() + .unwrap(); + + let _reply1 = zenoh + .get("test/ringbuffer_query") + .with_value("query1") + .res() + .unwrap(); + let _reply2 = zenoh + .get("test/ringbuffer_query") + .with_value("query2") + .res() + .unwrap(); + + let query = queryable.recv().unwrap().unwrap(); + // Only receive the latest query + assert_eq!( + query + .value() + .unwrap() + .payload + .deserialize::() + .unwrap(), + "query2" + ); +} diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 1f502138e4..bf7ec3d7eb 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -1,3 +1,16 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// use std::sync::{Arc, Mutex}; use zenoh_core::zlock; From fa4b98d0a791d16b9f7c19865aeee4d08ced1766 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 26 Mar 2024 13:33:44 +0100 Subject: [PATCH 051/357] Remove unmantained complete_n feature (#862) * Remove unmantained complete_n feature * Refined QueryableInfo message format * Remove useless bitflag --- commons/zenoh-codec/Cargo.toml | 1 - commons/zenoh-codec/src/network/declare.rs | 53 +++++- commons/zenoh-codec/src/network/request.rs | 5 - commons/zenoh-protocol/Cargo.toml | 1 - commons/zenoh-protocol/src/network/declare.rs | 49 ++--- commons/zenoh-protocol/src/network/request.rs | 4 - zenoh/Cargo.toml | 1 - zenoh/src/lib.rs | 1 - zenoh/src/net/routing/dispatcher/queries.rs | 178 +++++------------- zenoh/src/net/routing/dispatcher/resource.rs | 9 +- zenoh/src/net/routing/hat/client/mod.rs | 6 +- zenoh/src/net/routing/hat/client/queries.rs | 32 ++-- .../src/net/routing/hat/linkstate_peer/mod.rs | 6 +- .../net/routing/hat/linkstate_peer/queries.rs | 49 ++--- zenoh/src/net/routing/hat/mod.rs | 4 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 4 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 32 ++-- zenoh/src/net/routing/hat/router/mod.rs | 8 +- zenoh/src/net/routing/hat/router/queries.rs | 60 +++--- zenoh/src/net/runtime/adminspace.rs | 7 +- zenoh/src/session.rs | 6 +- zenohd/src/main.rs | 2 - 22 files changed, 194 insertions(+), 324 deletions(-) diff --git a/commons/zenoh-codec/Cargo.toml b/commons/zenoh-codec/Cargo.toml index 72f507a596..5b7b8de6ed 100644 --- a/commons/zenoh-codec/Cargo.toml +++ b/commons/zenoh-codec/Cargo.toml @@ -41,7 +41,6 @@ shared-memory = [ "zenoh-shm", "zenoh-protocol/shared-memory" ] -complete_n = ["zenoh-protocol/complete_n"] [dependencies] log = { workspace = true, optional = true } diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index bcc55ed62b..c81514ab3e 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -511,7 +511,46 @@ where } // QueryableInfo -crate::impl_zextz64!(queryable::ext::QueryableInfo, queryable::ext::Info::ID); +impl WCodec<(&queryable::ext::QueryableInfoType, bool), &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + fn write(self, writer: &mut W, x: (&queryable::ext::QueryableInfoType, bool)) -> Self::Output { + let (x, more) = x; + + let mut flags: u8 = 0; + if x.complete { + flags |= queryable::ext::flag::C; + } + let v: u64 = (flags as u64) | ((x.distance as u64) << 8); + let ext = queryable::ext::QueryableInfo::new(v); + + self.write(&mut *writer, (&ext, more)) + } +} + +impl RCodec<(queryable::ext::QueryableInfoType, bool), &mut R> for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read( + self, + reader: &mut R, + ) -> Result<(queryable::ext::QueryableInfoType, bool), Self::Error> { + let (ext, more): (queryable::ext::QueryableInfo, bool) = self.read(&mut *reader)?; + + let complete = imsg::has_flag(ext.value as u8, queryable::ext::flag::C); + let distance = (ext.value >> 8) as u16; + + Ok(( + queryable::ext::QueryableInfoType { complete, distance }, + more, + )) + } +} // DeclareQueryable impl WCodec<&queryable::DeclareQueryable, &mut W> for Zenoh080 @@ -529,7 +568,7 @@ where // Header let mut header = declare::id::D_QUERYABLE; - let mut n_exts = (ext_info != &queryable::ext::QueryableInfo::DEFAULT) as u8; + let mut n_exts = (ext_info != &queryable::ext::QueryableInfoType::DEFAULT) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } @@ -544,9 +583,9 @@ where // Body self.write(&mut *writer, id)?; self.write(&mut *writer, wire_expr)?; - if ext_info != &queryable::ext::QueryableInfo::DEFAULT { + if ext_info != &queryable::ext::QueryableInfoType::DEFAULT { n_exts -= 1; - self.write(&mut *writer, (*ext_info, n_exts != 0))?; + self.write(&mut *writer, (ext_info, n_exts != 0))?; } Ok(()) @@ -589,15 +628,15 @@ where }; // Extensions - let mut ext_info = queryable::ext::QueryableInfo::DEFAULT; + let mut ext_info = queryable::ext::QueryableInfoType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, queryable::flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; let eodec = Zenoh080Header::new(ext); match iext::eid(ext) { - queryable::ext::Info::ID => { - let (i, ext): (queryable::ext::QueryableInfo, bool) = + queryable::ext::QueryableInfo::ID => { + let (i, ext): (queryable::ext::QueryableInfoType, bool) = eodec.read(&mut *reader)?; ext_info = i; has_ext = ext; diff --git a/commons/zenoh-codec/src/network/request.rs b/commons/zenoh-codec/src/network/request.rs index 364c1af3d0..6173840d7e 100644 --- a/commons/zenoh-codec/src/network/request.rs +++ b/commons/zenoh-codec/src/network/request.rs @@ -43,8 +43,6 @@ where ext::TargetType::BestMatching => 0, ext::TargetType::All => 1, ext::TargetType::AllComplete => 2, - #[cfg(feature = "complete_n")] - ext::TargetType::Complete(n) => 3 + *n, }; let ext = ext::Target::new(v); self.write(&mut *writer, (&ext, more)) @@ -63,9 +61,6 @@ where 0 => ext::TargetType::BestMatching, 1 => ext::TargetType::All, 2 => ext::TargetType::AllComplete, - #[cfg(feature = "complete_n")] - n => ext::TargetType::Complete(n - 3), - #[cfg(not(feature = "complete_n"))] _ => return Err(DidntRead), }; Ok((rt, more)) diff --git a/commons/zenoh-protocol/Cargo.toml b/commons/zenoh-protocol/Cargo.toml index 93c92ee33f..9d7e35d690 100644 --- a/commons/zenoh-protocol/Cargo.toml +++ b/commons/zenoh-protocol/Cargo.toml @@ -36,7 +36,6 @@ std = [ test = ["rand", "zenoh-buffers/test"] shared-memory = ["std", "zenoh-buffers/shared-memory"] stats = [] -complete_n = [] [dependencies] const_format = { workspace = true } diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 187fa87662..d41d8bf67f 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -464,31 +464,35 @@ pub mod queryable { pub struct DeclareQueryable { pub id: QueryableId, pub wire_expr: WireExpr<'static>, - pub ext_info: ext::QueryableInfo, + pub ext_info: ext::QueryableInfoType, } pub mod ext { use super::*; - pub type Info = zextz64!(0x01, false); + pub type QueryableInfo = zextz64!(0x01, false); + pub mod flag { + pub const C: u8 = 1; // 0x01 Complete if C==1 then the queryable is complete + } + /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |Z|0_1| ID | /// +-+-+-+---------+ - /// ~ complete_n ~ + /// |x|x|x|x|x|x|x|C| /// +---------------+ - /// ~ distance ~ + /// ~ distance ~ /// +---------------+ #[derive(Debug, Clone, Copy, PartialEq, Eq)] - pub struct QueryableInfo { - pub complete: u8, // Default 0: incomplete // @TODO: maybe a bitflag - pub distance: u32, // Default 0: no distance + pub struct QueryableInfoType { + pub complete: bool, // Default false: incomplete + pub distance: u16, // Default 0: no distance } - impl QueryableInfo { + impl QueryableInfoType { pub const DEFAULT: Self = Self { - complete: 0, + complete: false, distance: 0, }; @@ -496,35 +500,18 @@ pub mod queryable { pub fn rand() -> Self { use rand::Rng; let mut rng = rand::thread_rng(); - let complete: u8 = rng.gen(); - let distance: u32 = rng.gen(); + let complete: bool = rng.gen_bool(0.5); + let distance: u16 = rng.gen(); Self { complete, distance } } } - impl Default for QueryableInfo { + impl Default for QueryableInfoType { fn default() -> Self { Self::DEFAULT } } - - impl From for QueryableInfo { - fn from(ext: Info) -> Self { - let complete = ext.value as u8; - let distance = (ext.value >> 8) as u32; - - Self { complete, distance } - } - } - - impl From for Info { - fn from(ext: QueryableInfo) -> Self { - let mut v: u64 = ext.complete as u64; - v |= (ext.distance as u64) << 8; - Info::new(v) - } - } } impl DeclareQueryable { @@ -535,7 +522,7 @@ pub mod queryable { let id: QueryableId = rng.gen(); let wire_expr = WireExpr::rand(); - let ext_info = ext::QueryableInfo::rand(); + let ext_info = ext::QueryableInfoType::rand(); Self { id, @@ -553,7 +540,7 @@ pub mod queryable { /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| U_QBL | + /// |Z|0_2| U_QBL | /// +---------------+ /// ~ qbls_id:z32 ~ /// +---------------+ diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index aba6bb057a..ff978744e8 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -93,8 +93,6 @@ pub mod ext { BestMatching, All, AllComplete, - #[cfg(feature = "complete_n")] - Complete(u64), } impl TargetType { @@ -109,8 +107,6 @@ pub mod ext { TargetType::All, TargetType::AllComplete, TargetType::BestMatching, - #[cfg(feature = "complete_n")] - TargetType::Complete(rng.gen()), ] .choose(&mut rng) .unwrap() diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index e6f7a4d9aa..1333ea6a57 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -31,7 +31,6 @@ maintenance = { status = "actively-developed" } [features] auth_pubkey = ["zenoh-transport/auth_pubkey"] auth_usrpwd = ["zenoh-transport/auth_usrpwd"] -complete_n = ["zenoh-codec/complete_n"] shared-memory = [ "zenoh-shm", "zenoh-protocol/shared-memory", diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index eb1ba1bcd1..ed2f01f180 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -106,7 +106,6 @@ pub const FEATURES: &str = concat_enabled_features!( features = [ "auth_pubkey", "auth_usrpwd", - "complete_n", "shared-memory", "stats", "transport_multilink", diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 04262e555d..753a4003e1 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -24,7 +24,7 @@ use zenoh_config::WhatAmI; use zenoh_protocol::{ core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ - declare::{ext, queryable::ext::QueryableInfo, QueryableId}, + declare::{ext, queryable::ext::QueryableInfoType, QueryableId}, request::{ext::TargetType, Request, RequestId}, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, @@ -44,7 +44,7 @@ pub(crate) fn declare_queryable( face: &mut Arc, id: QueryableId, expr: &WireExpr, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, ) { let rtables = zread!(tables.tables); @@ -287,22 +287,11 @@ fn compute_final_route( .hat_code .egress_filter(tables, src_face, &qabl.direction.0, expr) { - #[cfg(feature = "complete_n")] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, *target) - }); - } - #[cfg(not(feature = "complete_n"))] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); - } + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid) + }); } } route @@ -315,46 +304,11 @@ fn compute_final_route( .hat_code .egress_filter(tables, src_face, &qabl.direction.0, expr) { - #[cfg(feature = "complete_n")] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, *target) - }); - } - #[cfg(not(feature = "complete_n"))] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); - } - } - } - route - } - #[cfg(feature = "complete_n")] - TargetType::Complete(n) => { - let mut route = HashMap::new(); - let mut remaining = *n; - for qabl in qabls.iter() { - if qabl.complete > 0 - && tables - .hat_code - .egress_filter(tables, src_face, &qabl.direction.0, expr) - { - let nb = std::cmp::min(qabl.complete, remaining); route.entry(qabl.direction.0.id).or_insert_with(|| { let mut direction = qabl.direction.clone(); let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, TargetType::Complete(nb)) + (direction, qid) }); - remaining -= nb; - if remaining == 0 { - break; - } } } route @@ -365,18 +319,11 @@ fn compute_final_route( .find(|qabl| qabl.direction.0.id != src_face.id && qabl.complete > 0) { let mut route = HashMap::new(); - #[cfg(feature = "complete_n")] - { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid, *target)); - } - #[cfg(not(feature = "complete_n"))] - { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid)); - } + + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query); + route.insert(direction.0.id, (direction, qid)); + route } else { compute_final_route(tables, qabls, src_face, expr, &TargetType::All, query) @@ -624,78 +571,37 @@ pub fn route_query( expr.full_expr().to_string(), )); } else { - // let timer = tables.timer.clone(); - // let timeout = tables.queries_default_timeout; - #[cfg(feature = "complete_n")] - { - for ((outface, key_expr, context), qid, t) in route.values() { - // timer.add(TimedEvent::once( - // Instant::now() + timeout, - // QueryCleanup { - // tables: tables_ref.clone(), - // face: Arc::downgrade(&outface), - // *qid, - // }, - // )); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) - } - - log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(RoutingContext::with_expr( - Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::REQUEST, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target: *t, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }, - expr.full_expr().to_string(), - )); + for ((outface, key_expr, context), qid) in route.values() { + // timer.add(TimedEvent::once( + // Instant::now() + timeout, + // QueryCleanup { + // tables: tables_ref.clone(), + // face: Arc::downgrade(&outface), + // *qid, + // }, + // )); + #[cfg(feature = "stats")] + if !admin { + inc_req_stats!(outface, tx, user, body) + } else { + inc_req_stats!(outface, tx, admin, body) } - } - #[cfg(not(feature = "complete_n"))] - { - for ((outface, key_expr, context), qid) in route.values() { - // timer.add(TimedEvent::once( - // Instant::now() + timeout, - // QueryCleanup { - // tables: tables_ref.clone(), - // face: Arc::downgrade(&outface), - // *qid, - // }, - // )); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) - } - - log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(RoutingContext::with_expr( - Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::REQUEST, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target: target, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }, - expr.full_expr().to_string(), - )); - } + log::trace!("Propagate query {}:{} to {}", face, qid, outface); + outface.primitives.send_request(RoutingContext::with_expr( + Request { + id: *qid, + wire_expr: key_expr.into(), + ext_qos: ext::QoSType::REQUEST, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + ext_target: target, + ext_budget: None, + ext_timeout: None, + payload: body.clone(), + }, + expr.full_expr().to_string(), + )); } } } else { diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 3e35db14b6..0450dab38a 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -21,14 +21,12 @@ use std::convert::TryInto; use std::hash::{Hash, Hasher}; use std::sync::{Arc, Weak}; use zenoh_config::WhatAmI; -#[cfg(feature = "complete_n")] -use zenoh_protocol::network::request::ext::TargetType; use zenoh_protocol::network::RequestId; use zenoh_protocol::{ core::{key_expr::keyexpr, ExprId, WireExpr}, network::{ declare::{ - ext, queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo, Declare, + ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, }, Mapping, @@ -40,9 +38,6 @@ pub(crate) type NodeId = u16; pub(crate) type Direction = (Arc, WireExpr<'static>, NodeId); pub(crate) type Route = HashMap; -#[cfg(feature = "complete_n")] -pub(crate) type QueryRoute = HashMap; -#[cfg(not(feature = "complete_n"))] pub(crate) type QueryRoute = HashMap; pub(crate) struct QueryTargetQabl { pub(crate) direction: Direction, @@ -56,7 +51,7 @@ pub(crate) struct SessionContext { pub(crate) local_expr_id: Option, pub(crate) remote_expr_id: Option, pub(crate) subs: Option, - pub(crate) qabl: Option, + pub(crate) qabl: Option, pub(crate) in_interceptor_cache: Option>, pub(crate) e_interceptor_cache: Option>, } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index a9908f5f58..8b7031152a 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -42,7 +42,9 @@ use std::{ sync::{atomic::AtomicU32, Arc}, }; use zenoh_config::WhatAmI; -use zenoh_protocol::network::declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}; +use zenoh_protocol::network::declare::{ + queryable::ext::QueryableInfoType, QueryableId, SubscriberId, +}; use zenoh_protocol::network::Oam; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -284,7 +286,7 @@ struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfo)>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 81e5ba52d9..2ac3f1b993 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -32,29 +32,24 @@ use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -#[cfg(not(feature = "complete_n"))] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); - this.distance = std::cmp::min(this.distance, info.distance); - this -} - -fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + _tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -71,10 +66,7 @@ fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } fn propagate_simple_queryable( @@ -121,7 +113,7 @@ fn register_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { @@ -147,7 +139,7 @@ fn declare_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { register_client_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face)); @@ -263,7 +255,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, _node_id: NodeId, ) { declare_client_queryable(tables, face, id, res, qabl_info); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 3c4e2091f0..71c483e7bd 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -51,7 +51,7 @@ use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, Ze use zenoh_protocol::{ common::ZExtBody, network::{ - declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}, + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, oam::id::OAM_LINKSTATE, Oam, }, @@ -449,7 +449,7 @@ impl HatBaseTrait for HatCode { struct HatContext { router_subs: HashSet, peer_subs: HashSet, - peer_qabls: HashMap, + peer_qabls: HashMap, } impl HatContext { @@ -467,7 +467,7 @@ struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfo)>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index fa553e5121..9fba744a9c 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -35,29 +35,20 @@ use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -#[cfg(not(feature = "complete_n"))] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); - this.distance = std::cmp::min(this.distance, info.distance); - this -} - -fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfo { +fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfoType { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -70,13 +61,14 @@ fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfo accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } -fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { let info = if res.context.is_some() { res_hat!(res) .peer_qabls @@ -112,10 +104,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } #[inline] @@ -124,7 +113,7 @@ fn send_sourced_queryable_to_net_childs( net: &Network, childs: &[NodeIndex], res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, routing_context: NodeId, ) { @@ -198,7 +187,7 @@ fn propagate_simple_queryable( fn propagate_sourced_queryable( tables: &Tables, res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, source: &ZenohId, ) { @@ -236,7 +225,7 @@ fn register_peer_queryable( tables: &mut Tables, mut face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, peer: ZenohId, ) { let current_info = res_hat!(res).peer_qabls.get(&peer); @@ -261,7 +250,7 @@ fn declare_peer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, peer: ZenohId, ) { let face = Some(face); @@ -273,7 +262,7 @@ fn register_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { @@ -299,7 +288,7 @@ fn declare_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { register_client_queryable(tables, face, id, res, qabl_info); let local_details = local_peer_qabl_info(tables, res); @@ -599,7 +588,7 @@ fn insert_target_for_qabls( tables: &Tables, net: &Network, source: NodeId, - qabls: &HashMap, + qabls: &HashMap, complete: bool, ) { if net.trees.len() > source as usize { @@ -645,7 +634,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, ) { if face.whatami != WhatAmI::Client { diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index d9feb687f2..70e94ac176 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -32,7 +32,7 @@ use zenoh_protocol::{ core::WireExpr, network::{ declare::{ - queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo, QueryableId, + queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, SubscriberId, }, Oam, @@ -154,7 +154,7 @@ pub(crate) trait HatQueriesTrait { face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, ); fn undeclare_queryable( diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 59b39d4284..1d87c2eb23 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -53,7 +53,7 @@ use zenoh_protocol::network::{ }; use zenoh_protocol::{ common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE}, + network::{declare::queryable::ext::QueryableInfoType, oam::id::OAM_LINKSTATE}, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -360,7 +360,7 @@ struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfo)>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index caea6fe6b8..38f77bec45 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -32,29 +32,24 @@ use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -#[cfg(not(feature = "complete_n"))] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); - this.distance = std::cmp::min(this.distance, info.distance); - this -} - -fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + _tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -71,10 +66,7 @@ fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } fn propagate_simple_queryable( @@ -121,7 +113,7 @@ fn register_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { @@ -147,7 +139,7 @@ fn declare_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { register_client_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face)); @@ -263,7 +255,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, _node_id: NodeId, ) { declare_client_queryable(tables, face, id, res, qabl_info); diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 47cf02db46..27db136eda 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -56,7 +56,7 @@ use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, Ze use zenoh_protocol::{ common::ZExtBody, network::{ - declare::{queryable::ext::QueryableInfo, QueryableId, SubscriberId}, + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, oam::id::OAM_LINKSTATE, Oam, }, @@ -748,8 +748,8 @@ impl HatBaseTrait for HatCode { struct HatContext { router_subs: HashSet, peer_subs: HashSet, - router_qabls: HashMap, - peer_qabls: HashMap, + router_qabls: HashMap, + peer_qabls: HashMap, } impl HatContext { @@ -768,7 +768,7 @@ struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfo)>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index aca6f71b3e..61abaa7c55 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -35,29 +35,20 @@ use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -#[cfg(not(feature = "complete_n"))] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); - this.distance = std::cmp::min(this.distance, info.distance); - this -} - -fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { +fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfoType { let info = if hat!(tables).full_net(WhatAmI::Peer) { res.context.as_ref().and_then(|_| { res_hat!(res) @@ -89,13 +80,10 @@ fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } -fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { +fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfoType { let info = if res.context.is_some() { res_hat!(res) .router_qabls @@ -125,13 +113,14 @@ fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } -fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { let mut info = if res.context.is_some() { res_hat!(res) .router_qabls @@ -183,10 +172,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } #[inline] @@ -195,7 +181,7 @@ fn send_sourced_queryable_to_net_childs( net: &Network, childs: &[NodeIndex], res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, routing_context: NodeId, ) { @@ -279,7 +265,7 @@ fn propagate_simple_queryable( fn propagate_sourced_queryable( tables: &Tables, res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, source: &ZenohId, net_type: WhatAmI, @@ -318,7 +304,7 @@ fn register_router_queryable( tables: &mut Tables, mut face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, router: ZenohId, ) { let current_info = res_hat!(res).router_qabls.get(&router); @@ -356,7 +342,7 @@ fn declare_router_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, router: ZenohId, ) { register_router_queryable(tables, Some(face), res, qabl_info, router); @@ -366,7 +352,7 @@ fn register_peer_queryable( tables: &mut Tables, face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, peer: ZenohId, ) { let current_info = res_hat!(res).peer_qabls.get(&peer); @@ -386,7 +372,7 @@ fn declare_peer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, peer: ZenohId, ) { let mut face = Some(face); @@ -401,7 +387,7 @@ fn register_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { @@ -427,7 +413,7 @@ fn declare_client_queryable( face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { register_client_queryable(tables, face, id, res, qabl_info); let local_details = local_router_qabl_info(tables, res); @@ -975,7 +961,7 @@ fn insert_target_for_qabls( tables: &Tables, net: &Network, source: NodeId, - qabls: &HashMap, + qabls: &HashMap, complete: bool, ) { if net.trees.len() > source as usize { @@ -1021,7 +1007,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, ) { match face.whatami { diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 29106cb89d..343199e367 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -39,7 +39,7 @@ use zenoh_protocol::{ ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID, }, network::{ - declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, + declare::{queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo}, ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, ResponseFinal, }, @@ -283,10 +283,7 @@ impl AdminSpace { body: DeclareBody::DeclareQueryable(DeclareQueryable { id: runtime.next_id(), wire_expr: [&root_key, "/**"].concat().into(), - ext_info: QueryableInfo { - complete: 0, - distance: 0, - }, + ext_info: QueryableInfoType::DEFAULT, }), }); diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 496c6879ce..58d315c848 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -71,7 +71,7 @@ use zenoh_protocol::{ }, network::{ declare::{ - self, common::ext::WireExprType, queryable::ext::QueryableInfo, + self, common::ext::WireExprType, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, @@ -1177,8 +1177,8 @@ impl Session { if origin != Locality::SessionLocal { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); - let qabl_info = QueryableInfo { - complete: if complete { 1 } else { 0 }, + let qabl_info = QueryableInfoType { + complete, distance: 0, }; primitives.send_declare(Declare { diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index b0d29ea89b..af7ec3bf43 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -371,7 +371,6 @@ fn test_default_features() { concat!( " zenoh/auth_pubkey", " zenoh/auth_usrpwd", - // " zenoh/complete_n", // " zenoh/shared-memory", // " zenoh/stats", " zenoh/transport_multilink", @@ -397,7 +396,6 @@ fn test_no_default_features() { concat!( // " zenoh/auth_pubkey", // " zenoh/auth_usrpwd", - // " zenoh/complete_n", // " zenoh/shared-memory", // " zenoh/stats", // " zenoh/transport_multilink", From 8cd60d0afaeec5ab0468e899db300302f65c62e6 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 00:47:48 +0100 Subject: [PATCH 052/357] Publication updated --- examples/examples/z_pub.rs | 1 + zenoh/src/publication.rs | 254 +++++++++++++++++++++++++------------ zenoh/src/queryable.rs | 13 +- zenoh/src/sample.rs | 18 ++- zenoh/src/session.rs | 5 +- 5 files changed, 196 insertions(+), 95 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 7ba17745b5..d22d4d55ee 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -16,6 +16,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh::sample_builder::SampleBuilderTrait; use zenoh_examples::CommonArgs; #[async_std::main] diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 97f485f1e3..1e1c0cb509 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -17,7 +17,7 @@ use crate::net::primitives::Primitives; use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; -use crate::sample::{DataInfo, QoS, Sample, SampleKind}; +use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; use crate::sample_builder::{ DeleteSampleBuilderTrait, PutSampleBuilderTrait, QoSBuilderTrait, SampleBuilderTrait, }; @@ -33,8 +33,6 @@ use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::network::push::ext; use zenoh_protocol::network::Mapping; use zenoh_protocol::network::Push; -#[zenoh_macros::unstable] -use zenoh_protocol::zenoh::ext::SourceInfoType; use zenoh_protocol::zenoh::Del; use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::zenoh::Put; @@ -63,7 +61,7 @@ pub struct DeleteBuilder<'a, 'b> { pub(crate) publisher: PublisherBuilder<'a, 'b>, pub(crate) timestamp: Option, #[cfg(feature = "unstable")] - pub(crate) source_info: Option, + pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } @@ -94,7 +92,7 @@ pub struct PutBuilder<'a, 'b> { pub(crate) encoding: Encoding, pub(crate) timestamp: Option, #[cfg(feature = "unstable")] - pub(crate) source_info: Option, + pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } @@ -160,7 +158,7 @@ impl SampleBuilderTrait for PutBuilder<'_, '_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { - source_info: Some(source_info), + source_info, ..self } } @@ -190,7 +188,7 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { - source_info: Some(source_info), + source_info, ..self } } @@ -258,7 +256,7 @@ impl Resolvable for DeleteBuilder<'_, '_> { impl SyncResolve for PutBuilder<'_, '_> { #[inline] fn res_sync(self) -> ::To { - let publisher = self.publisher.one_time_res_sync()?; + let publisher = self.publisher.create_one_shot_publisher()?; resolve_put( &publisher, self.payload, @@ -276,7 +274,7 @@ impl SyncResolve for PutBuilder<'_, '_> { impl SyncResolve for DeleteBuilder<'_, '_> { #[inline] fn res_sync(self) -> ::To { - let publisher = self.publisher.one_time_res_sync()?; + let publisher = self.publisher.create_one_shot_publisher()?; resolve_put( &publisher, Payload::empty(), @@ -467,20 +465,6 @@ impl<'a> Publisher<'a> { std::sync::Arc::new(self) } - fn _write(&self, kind: SampleKind, payload: Payload) -> Publication { - Publication { - publisher: self, - payload, - kind, - encoding: Encoding::ZENOH_BYTES, - timestamp: None, - #[cfg(feature = "unstable")] - source_info: None, - #[cfg(feature = "unstable")] - attachment: None, - } - } - /// Put data. /// /// # Examples @@ -494,11 +478,20 @@ impl<'a> Publisher<'a> { /// # }) /// ``` #[inline] - pub fn put(&self, payload: IntoPayload) -> Publication + pub fn put(&self, payload: IntoPayload) -> PutPublication where IntoPayload: Into, { - self._write(SampleKind::Put, payload.into()) + PutPublication { + publisher: self, + payload: payload.into(), + encoding: Encoding::ZENOH_BYTES, + timestamp: None, + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + } } /// Delete data. @@ -513,8 +506,15 @@ impl<'a> Publisher<'a> { /// publisher.delete().res().await.unwrap(); /// # }) /// ``` - pub fn delete(&self) -> Publication { - self._write(SampleKind::Delete, Payload::empty()) + pub fn delete(&self) -> DeletePublication { + DeletePublication { + publisher: self, + timestamp: None, + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + } } /// Return the [`MatchingStatus`] of the publisher. @@ -737,64 +737,129 @@ impl Drop for Publisher<'_> { } /// A [`Resolvable`] returned by [`Publisher::put()`](Publisher::put), -/// [`Publisher::delete()`](Publisher::delete) and [`Publisher::write()`](Publisher::write). #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct Publication<'a> { +pub struct PutPublication<'a> { publisher: &'a Publisher<'a>, payload: Payload, - kind: SampleKind, encoding: Encoding, timestamp: Option, #[cfg(feature = "unstable")] - pub(crate) source_info: Option, + pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } -impl<'a> Publication<'a> { - pub fn with_encoding(mut self, encoding: Encoding) -> Self { - self.encoding = encoding; - self +/// A [`Resolvable`] returned by [`Publisher::delete()`](Publisher::delete) +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +pub struct DeletePublication<'a> { + publisher: &'a Publisher<'a>, + timestamp: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} + +impl SampleBuilderTrait for PutPublication<'_> { + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { timestamp, ..self } } - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self + fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } } - /// Send data with the given [`SourceInfo`]. - /// - /// # Examples - /// ``` - /// # async_std::task::block_on(async { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.put("Value").with_source_info(SourceInfo { - /// source_id: Some(publisher.id()), - /// source_sn: Some(0), - /// }).res().await.unwrap(); - /// # }) - /// ``` - #[zenoh_macros::unstable] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.source_info = Some(source_info); - self + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { attachment, ..self } + } + + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + attachment: Some(attachment), + ..self + } + } +} + +impl PutSampleBuilderTrait for PutPublication<'_> { + fn with_encoding(self, encoding: Encoding) -> Self { + Self { encoding, ..self } } + + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + Self { + payload: payload.into(), + ..self + } + } +} + +impl SampleBuilderTrait for DeletePublication<'_> { + fn with_timestamp_opt(self, timestamp: Option) -> Self { + Self { timestamp, ..self } + } + + fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { attachment, ..self } + } + + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + attachment: Some(attachment), + ..self + } + } +} + +impl DeleteSampleBuilderTrait for DeletePublication<'_> {} + +impl Resolvable for PutPublication<'_> { + type To = ZResult<()>; } -impl Resolvable for Publication<'_> { +impl Resolvable for DeletePublication<'_> { type To = ZResult<()>; } -impl SyncResolve for Publication<'_> { +impl SyncResolve for PutPublication<'_> { fn res_sync(self) -> ::To { resolve_put( self.publisher, self.payload, - self.kind, + SampleKind::Put, self.encoding, self.timestamp, #[cfg(feature = "unstable")] @@ -805,7 +870,31 @@ impl SyncResolve for Publication<'_> { } } -impl AsyncResolve for Publication<'_> { +impl SyncResolve for DeletePublication<'_> { + fn res_sync(self) -> ::To { + resolve_put( + self.publisher, + Payload::empty(), + SampleKind::Delete, + Encoding::ZENOH_BYTES, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl AsyncResolve for PutPublication<'_> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +impl AsyncResolve for DeletePublication<'_> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -823,18 +912,25 @@ impl<'a> Sink for Publisher<'a> { #[inline] fn start_send(self: Pin<&mut Self>, item: Sample) -> Result<(), Self::Error> { - Publication { - publisher: &self, - payload: item.payload, - kind: item.kind, - encoding: item.encoding, - timestamp: None, + let SampleFields { + payload, + kind, + encoding, + #[cfg(feature = "unstable")] + attachment, + .. + } = item.into(); + resolve_put( + &self, + payload, + kind, + encoding, + None, #[cfg(feature = "unstable")] - source_info: None, + SourceInfo::empty(), #[cfg(feature = "unstable")] - attachment: item.attachment, - } - .res_sync() + attachment, + ) } #[inline] @@ -928,7 +1024,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { } // internal function for `PutBuilder` and `DeleteBuilder` - fn one_time_res_sync(self) -> ZResult> { + fn create_one_shot_publisher(self) -> ZResult> { Ok(Publisher { session: self.session, #[cfg(feature = "unstable")] @@ -1013,7 +1109,7 @@ fn resolve_put( kind: SampleKind, encoding: Encoding, timestamp: Option, - #[cfg(feature = "unstable")] source_info: Option, + #[cfg(feature = "unstable")] source_info: SourceInfo, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { log::trace!("write({:?}, [...])", &publisher.key_expr); @@ -1051,10 +1147,7 @@ fn resolve_put( timestamp, encoding: encoding.clone().into(), #[cfg(feature = "unstable")] - ext_sinfo: source_info.map(|s| SourceInfoType { - id: s.source_id.unwrap_or_default(), - sn: s.source_sn.unwrap_or_default() as u32, - }), + ext_sinfo: source_info.into(), #[cfg(not(feature = "unstable"))] ext_sinfo: None, #[cfg(feature = "shared-memory")] @@ -1076,10 +1169,7 @@ fn resolve_put( PushBody::Del(Del { timestamp, #[cfg(feature = "unstable")] - ext_sinfo: source_info.map(|s| SourceInfoType { - id: s.source_id.unwrap_or_default(), - sn: s.source_sn.unwrap_or_default() as u32, - }), + ext_sinfo: source_info.into(), #[cfg(not(feature = "unstable"))] ext_sinfo: None, ext_attachment, diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 9edb9fb26c..6f71cd7fb7 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -518,17 +518,10 @@ impl Query { { bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) } - #[allow(unused_mut)] // will be unused if feature = "unstable" is not enabled - let mut ext_sinfo = None; + #[cfg(not(feature = "unstable"))] + let ext_sinfo = None; #[cfg(feature = "unstable")] - { - if sample.source_info.source_id.is_some() || sample.source_info.source_sn.is_some() { - ext_sinfo = Some(zenoh::put::ext::SourceInfoType { - id: sample.source_info.source_id.unwrap_or_default(), - sn: sample.source_info.source_sn.unwrap_or_default() as u32, - }) - } - } + let ext_sinfo = sample.source_info.into(); self.inner.primitives.send_response(Response { rid: self.inner.qid, wire_expr: WireExpr { diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index acf8536a0e..1998f3e844 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -22,7 +22,7 @@ use crate::Priority; use serde::Serialize; use std::{convert::TryFrom, fmt}; use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType}; +use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType, zenoh}; pub type SourceSn = u64; @@ -163,6 +163,22 @@ impl SourceInfo { source_sn: None, } } + pub(crate) fn is_empty(&self) -> bool { + self.source_id.is_none() && self.source_sn.is_none() + } +} + +impl From for Option { + fn from(source_info: SourceInfo) -> Option { + if source_info.is_empty() { + None + } else { + Some(zenoh::put::ext::SourceInfoType { + id: source_info.source_id.unwrap_or_default(), + sn: source_info.source_sn.unwrap_or_default() as u32, + }) + } + } } #[zenoh_macros::unstable] diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index db81888018..a2371d1bfa 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -41,6 +41,7 @@ use crate::Priority; use crate::Sample; use crate::SampleKind; use crate::Selector; +use crate::SourceInfo; use crate::Value; use async_std::task; use log::{error, trace, warn}; @@ -711,7 +712,7 @@ impl Session { #[cfg(feature = "unstable")] attachment: None, #[cfg(feature = "unstable")] - source_info: None, + source_info: SourceInfo::empty(), } } @@ -745,7 +746,7 @@ impl Session { #[cfg(feature = "unstable")] attachment: None, #[cfg(feature = "unstable")] - source_info: None, + source_info: SourceInfo::empty(), } } /// Query data from the matching queryables in the system. From 00e0a59a71804fa54e4e2cc6d92a35731a079654 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 00:59:12 +0100 Subject: [PATCH 053/357] build fix --- examples/examples/z_pub_shm_thr.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 7c6f3cbbd3..5230ea3ce6 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; +use zenoh::sample_builder::QoSBuilderTrait; use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; From e601271c25becb47c0f14fbbf0dccce2dfdb81f5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 02:26:16 +0100 Subject: [PATCH 054/357] reply_sample restored --- plugins/zenoh-plugin-example/src/lib.rs | 13 +----- .../src/replica/storage.rs | 42 +++++++++++-------- zenoh-ext/src/publication_cache.rs | 33 +++------------ zenoh-ext/src/querying_subscriber.rs | 9 +--- zenoh/src/queryable.rs | 38 ++++++++++++----- zenoh/src/sample_builder.rs | 6 +-- 6 files changed, 64 insertions(+), 77 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 40f8d69488..04f49b4739 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -24,8 +24,6 @@ use std::sync::{ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; -use zenoh::sample::SampleFields; -use zenoh::sample_builder::SampleBuilderTrait; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; @@ -176,16 +174,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { - let SampleFields { key_expr, timestamp, attachment, source_info, payload, kind, .. } = sample.clone().into(); - let reply = query - .reply_sample(key_expr) - .with_timestamp_opt(timestamp) - .with_attachment_opt(attachment) - .with_source_info(source_info); - match kind { - SampleKind::Put => reply.put(payload).res().await.unwrap(), - SampleKind::Delete => reply.delete().res().await.unwrap(), - } + query.reply_sample(sample.clone()).res().await.unwrap(); } } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 67ce871bb0..aed13bbbf1 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -21,14 +21,17 @@ use futures::select; use std::collections::{HashMap, HashSet}; use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; +use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; -use zenoh::prelude::r#async::*; -use zenoh::query::ConsolidationMode; +use zenoh::key_expr::KeyExpr; +use zenoh::query::{ConsolidationMode, QueryTarget}; +use zenoh::sample::{Sample, SampleKind}; use zenoh::sample_builder::{ - PutSampleBuilder, PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait, + DeleteSampleBuilder, PutSampleBuilder, PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait, }; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; -use zenoh::{Result as ZResult, Session}; +use zenoh::value::Value; +use zenoh::{Result as ZResult, Session, SessionDeclarations}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; use zenoh_core::{AsyncResolve, SyncResolve}; @@ -235,11 +238,8 @@ impl StorageService { continue; } }; - let sample = if sample.timestamp().is_none() { - SampleBuilder::from(sample).with_timestamp(new_reception_timestamp()).res_sync() - } else { - sample - }; + let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); + let sample = SampleBuilder::from(sample).with_timestamp(timestamp).res_sync(); self.process_sample(sample).await; }, // on query on key_expr @@ -307,21 +307,27 @@ impl StorageService { .ovderriding_wild_update(&k, sample.timestamp().unwrap()) .await { - Some(overriding_update) => { + Some(Update { + kind: SampleKind::Put, + data, + }) => { let Value { payload, encoding, .. - } = overriding_update.data.value; + } = data.value; PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) .with_encoding(encoding) - .with_timestamp(overriding_update.data.timestamp) - .res_sync() - } - None => { - PutSampleBuilder::new(KeyExpr::from(k.clone()), sample.payload().clone()) - .with_encoding(sample.encoding().clone()) - .with_timestamp(*sample.timestamp().unwrap()) + .with_timestamp(data.timestamp) .res_sync() } + Some(Update { + kind: SampleKind::Delete, + data, + }) => DeleteSampleBuilder::new(KeyExpr::from(k.clone())) + .with_timestamp(data.timestamp) + .res_sync(), + None => SampleBuilder::from(sample.clone()) + .keyexpr(k.clone()) + .res_sync(), }; let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 8a782a179e..85cb96cce2 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -20,8 +20,6 @@ use std::convert::TryInto; use std::future::Ready; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; -use zenoh::sample::SampleFields; -use zenoh::sample_builder::SampleBuilderTrait; use zenoh::subscriber::FlumeSubscriber; use zenoh::SessionRef; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; @@ -118,29 +116,6 @@ pub struct PublicationCache<'a> { _stoptx: Sender, } -async fn reply_sample(query: &Query, sample: &Sample) { - let SampleFields { - key_expr, - timestamp, - attachment, - source_info, - payload, - kind, - .. - } = sample.clone().into(); - let reply = query - .reply_sample(key_expr) - .with_timestamp_opt(timestamp) - .with_attachment_opt(attachment) - .with_source_info(source_info); - if let Err(e) = match kind { - SampleKind::Put => reply.put(payload).res_async().await, - SampleKind::Delete => reply.delete().res_async().await, - } { - log::warn!("Error replying to query: {}", e); - } -} - impl<'a> PublicationCache<'a> { fn new(conf: PublicationCacheBuilder<'a, '_, '_>) -> ZResult> { let key_expr = conf.pub_key_expr?; @@ -237,7 +212,9 @@ impl<'a> PublicationCache<'a> { continue; } } - reply_sample(&query, sample).await; + if let Err(e) = query.reply_sample(sample.clone()).res_async().await { + log::warn!("Error replying to query: {}", e); + } } } } else { @@ -249,7 +226,9 @@ impl<'a> PublicationCache<'a> { continue; } } - reply_sample(&query, sample).await; + if let Err(e) = query.reply_sample(sample.clone()).res_async().await { + log::warn!("Error replying to query: {}", e); + } } } } diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index eb6d6e9516..5c302840b8 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -664,13 +664,8 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { log::trace!("Sample received while fetch in progress: push it to merge_queue"); // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. - let s = if s.timestamp().is_none() { - SampleBuilder::from(s) - .with_timestamp(new_reception_timestamp()) - .res_sync() - } else { - s - }; + let timestamp = s.timestamp().cloned().unwrap_or(new_reception_timestamp()); + let s = SampleBuilder::from(s).with_timestamp(timestamp).res_sync(); state.merge_queue.push(s); } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 6f71cd7fb7..14e9d09068 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -106,20 +106,19 @@ impl Query { self.inner.attachment.as_ref() } - /// Sends a reply or delete reply to this Query + /// Sends a reply in the form of [`Sample`] to this Query. /// - /// This function is useful when resending the samples which can be of [`SampleKind::Put`] or [`SampleKind::Delete`] - /// It allows to build the reply with same common parameters, like timestamp, attachment, source_info, etc. - /// and only on final step to choose the kind of reply by calling [`ReplySampleBuilder::put`] or [`ReplySampleBuilder::delete`] methods. + /// By default, queries only accept replies whose key expression intersects with the query's. + /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), + /// replying on a disjoint key expression will result in an error when resolving the reply. + /// This api is for internal use only. #[inline(always)] - pub fn reply_sample(&self, key_expr: IntoKeyExpr) -> ReplySampleBuilder - where - IntoKeyExpr: Into>, - { - let sample_builder = SampleBuilder::new(key_expr); + #[cfg(feature = "unstable")] + #[doc(hidden)] + pub fn reply_sample(&self, sample: Sample) -> ReplySampleBuilder<'_> { ReplySampleBuilder { query: self, - sample_builder, + sample_builder: sample.into(), } } @@ -302,6 +301,25 @@ impl QoSBuilderTrait for ReplySampleBuilder<'_> { } } +impl Resolvable for ReplySampleBuilder<'_> { + type To = ZResult<()>; +} + +impl SyncResolve for ReplySampleBuilder<'_> { + fn res_sync(self) -> ::To { + let sample = self.sample_builder.res_sync(); + self.query._reply_sample(sample) + } +} + +impl AsyncResolve for ReplySampleBuilder<'_> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + /// A builder returned by [`Query::reply()`](Query::reply) #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 1710cbc85b..7e38e84afd 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -87,7 +87,7 @@ impl SampleBuilder { }) } /// Allows to change keyexpr of [`Sample`] - pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { @@ -189,7 +189,7 @@ impl PutSampleBuilder { where IntoKeyExpr: Into>, { - Self(self.0.with_keyexpr(key_expr)) + Self(self.0.keyexpr(key_expr)) } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. pub(crate) fn with_qos(self, qos: QoS) -> Self { @@ -283,7 +283,7 @@ impl DeleteSampleBuilder { where IntoKeyExpr: Into>, { - Self(self.0.with_keyexpr(key_expr)) + Self(self.0.keyexpr(key_expr)) } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. pub(crate) fn with_qos(self, qos: QoS) -> Self { From ea4020ddd3bba3402bba7a4fe172cc2518333066 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 11:29:05 +0100 Subject: [PATCH 055/357] build fixes --- zenoh/src/publication.rs | 2 ++ zenoh/src/session.rs | 1 + 2 files changed, 3 insertions(+) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 1e1c0cb509..8772319593 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -73,6 +73,7 @@ pub struct DeleteBuilder<'a, 'b> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; +/// use zenoh::sample_builder::{PutSampleBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session @@ -951,6 +952,7 @@ impl<'a> Sink for Publisher<'a> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; +/// use zenoh::sample_builder::{PutSampleBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index a2371d1bfa..ffe7036050 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -683,6 +683,7 @@ impl Session { /// ``` /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; + /// use zenoh::sample_builder::PutSampleBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session From 5f0b531041ace6a303533ac0fbc56227ba121617 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 12:37:22 +0100 Subject: [PATCH 056/357] clippy warning fix --- commons/zenoh-macros/build.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/commons/zenoh-macros/build.rs b/commons/zenoh-macros/build.rs index 557593d00e..d3c6b4e55b 100644 --- a/commons/zenoh-macros/build.rs +++ b/commons/zenoh-macros/build.rs @@ -24,6 +24,7 @@ fn main() { let mut version_rs = OpenOptions::new() .create(true) .write(true) + .truncate(true) .open(version_rs) .unwrap(); version_rs.write_all(&output.stdout).unwrap(); From 5a9bf0aacbba65295489110f6c6d645b9c50811b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 27 Mar 2024 12:44:48 +0100 Subject: [PATCH 057/357] Payload API (#866) * Remove Deref/DerefMut to ZBuf from Payload. * Use reader in payload deserializer * Remove payload writer * Replace deserialize::() with deserialize::Cow() * Fix cargo clippy * Remove blank lifetime --- commons/zenoh-macros/build.rs | 1 + plugins/zenoh-plugin-example/src/lib.rs | 3 +- plugins/zenoh-plugin-rest/src/lib.rs | 22 +++- .../src/replica/storage.rs | 20 ++- zenoh-ext/src/group.rs | 7 +- zenoh/src/payload.rs | 121 ++++++++++-------- zenoh/tests/attachments.rs | 16 +-- 7 files changed, 108 insertions(+), 82 deletions(-) diff --git a/commons/zenoh-macros/build.rs b/commons/zenoh-macros/build.rs index 557593d00e..d5ce6632dc 100644 --- a/commons/zenoh-macros/build.rs +++ b/commons/zenoh-macros/build.rs @@ -23,6 +23,7 @@ fn main() { let version_rs = std::path::PathBuf::from(env::var_os("OUT_DIR").unwrap()).join("version.rs"); let mut version_rs = OpenOptions::new() .create(true) + .truncate(true) .write(true) .open(version_rs) .unwrap(); diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 04f49b4739..ad254278e3 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -15,6 +15,7 @@ use futures::select; use log::{debug, info}; +use std::borrow::Cow; use std::collections::HashMap; use std::convert::TryFrom; use std::sync::{ @@ -164,7 +165,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // on sample received by the Subscriber sample = sub.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + let payload = sample.payload().deserialize::>().unwrap_or_else(|e| Cow::from(e.to_string())); info!("Received data ('{}': '{}')", sample.key_expr(), payload); stored.insert(sample.key_expr().to_string(), sample); }, diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index e2718f6579..12c0dd6405 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -70,9 +70,11 @@ fn payload_to_json(payload: &Payload, encoding: &Encoding) -> serde_json::Value match encoding { // If it is a JSON try to deserialize as json, if it fails fallback to base64 &Encoding::APPLICATION_JSON | &Encoding::TEXT_JSON | &Encoding::TEXT_JSON5 => { - serde_json::from_slice::(&payload.contiguous()).unwrap_or( - serde_json::Value::String(StringOrBase64::from(payload).into_string()), - ) + payload + .deserialize::() + .unwrap_or_else(|_| { + serde_json::Value::String(StringOrBase64::from(payload).into_string()) + }) } // otherwise convert to JSON string _ => serde_json::Value::String(StringOrBase64::from(payload).into_string()), @@ -124,7 +126,10 @@ fn sample_to_html(sample: Sample) -> String { format!( "
{}
\n
{}
\n", sample.key_expr().as_str(), - String::from_utf8_lossy(&sample.payload().contiguous()) + sample + .payload() + .deserialize::>() + .unwrap_or_default() ) } @@ -134,7 +139,7 @@ fn result_to_html(sample: Result) -> String { Err(err) => { format!( "
ERROR
\n
{}
\n", - String::from_utf8_lossy(&err.payload.contiguous()) + err.payload.deserialize::>().unwrap_or_default() ) } } @@ -160,12 +165,15 @@ async fn to_raw_response(results: flume::Receiver) -> Response { Ok(sample) => response( StatusCode::Ok, Cow::from(sample.encoding()).as_ref(), - String::from_utf8_lossy(&sample.payload().contiguous()).as_ref(), + &sample + .payload() + .deserialize::>() + .unwrap_or_default(), ), Err(value) => response( StatusCode::Ok, Cow::from(&value.encoding).as_ref(), - String::from_utf8_lossy(&value.payload.contiguous()).as_ref(), + &value.payload.deserialize::>().unwrap_or_default(), ), }, Err(_) => response(StatusCode::Ok, "", ""), diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 35134dfe43..108beaabb2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -665,11 +665,23 @@ impl StorageService { } fn serialize_update(update: &Update) -> String { + let Update { + kind, + data: + StoredData { + value: Value { + payload, encoding, .. + }, + timestamp, + }, + } = update; + let zbuf: ZBuf = payload.into(); + let result = ( - update.kind.to_string(), - update.data.timestamp.to_string(), - update.data.value.encoding.to_string(), - update.data.value.payload.slices().collect::>(), + kind.to_string(), + timestamp.to_string(), + encoding.to_string(), + zbuf.slices().collect::>(), ); serde_json::to_string_pretty(&result).unwrap() } diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 41007d8b87..ec96a8b373 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,6 +25,7 @@ use std::convert::TryInto; use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; +use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; @@ -248,7 +249,7 @@ async fn net_event_handler(z: Arc, state: Arc) { .await .unwrap(); while let Ok(s) = sub.recv_async().await { - match bincode::deserialize::(&(s.payload().contiguous())) { + match bincode::deserialize_from::(s.payload().reader()) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { log::debug!("Member join: {:?}", &je.member); @@ -307,8 +308,8 @@ async fn net_event_handler(z: Arc, state: Arc) { while let Ok(reply) = receiver.recv_async().await { match reply.sample { Ok(sample) => { - match bincode::deserialize::( - &sample.payload().contiguous(), + match bincode::deserialize_from::( + sample.payload().reader(), ) { Ok(m) => { let mut expiry = Instant::now(); diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index aab8235249..ed2a58145c 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -15,14 +15,12 @@ //! Payload primitives. use crate::buffers::ZBuf; use std::{ - borrow::Cow, - convert::Infallible, - fmt::Debug, - ops::{Deref, DerefMut}, - string::FromUtf8Error, - sync::Arc, + borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, +}; +use zenoh_buffers::buffer::Buffer; +use zenoh_buffers::{ + buffer::SplitBuffer, reader::HasReader, writer::HasWriter, ZBufReader, ZSlice, }; -use zenoh_buffers::{buffer::SplitBuffer, reader::HasReader, writer::HasWriter, ZSlice}; use zenoh_result::ZResult; #[cfg(feature = "shared-memory")] use zenoh_shm::SharedMemoryBuf; @@ -44,19 +42,29 @@ impl Payload { { Self(t.into()) } -} -impl Deref for Payload { - type Target = ZBuf; + /// Returns wether the payload is empty or not. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns the length of the payload. + pub fn len(&self) -> usize { + self.0.len() + } - fn deref(&self) -> &Self::Target { - &self.0 + /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. + pub fn reader(&self) -> PayloadReader<'_> { + PayloadReader(self.0.reader()) } } -impl DerefMut for Payload { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 +/// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. +pub struct PayloadReader<'a>(ZBufReader<'a>); + +impl std::io::Read for PayloadReader<'_> { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + self.0.read(buf) } } @@ -81,10 +89,10 @@ impl Payload { /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. /// See [encode](Value::encode) for an example. - pub fn deserialize(&self) -> ZResult + pub fn deserialize<'a, T>(&'a self) -> ZResult where - ZSerde: Deserialize, - >::Error: Debug, + ZSerde: Deserialize<'a, T>, + >::Error: Debug, { let t: T = ZSerde.deserialize(self).map_err(|e| zerror!("{:?}", e))?; Ok(t) @@ -99,11 +107,11 @@ pub trait Serialize { fn serialize(self, t: T) -> Self::Output; } -pub trait Deserialize { +pub trait Deserialize<'a, T> { type Error; /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &Payload) -> Result; + fn deserialize(self, t: &'a Payload) -> Result; } /// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. @@ -129,7 +137,7 @@ impl From for ZBuf { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, ZBuf> for ZSerde { type Error = Infallible; fn deserialize(self, v: &Payload) -> Result { @@ -159,18 +167,17 @@ impl Serialize<&[u8]> for ZSerde { } } -impl Deserialize> for ZSerde { +impl Deserialize<'_, Vec> for ZSerde { type Error = Infallible; fn deserialize(self, v: &Payload) -> Result, Self::Error> { - let v: ZBuf = v.into(); - Ok(v.contiguous().to_vec()) + Ok(Vec::from(v)) } } impl From<&Payload> for Vec { fn from(value: &Payload) -> Self { - value.contiguous().to_vec() + Cow::from(value).to_vec() } } @@ -182,18 +189,17 @@ impl<'a> Serialize> for ZSerde { } } -impl<'a> Deserialize> for ZSerde { +impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { type Error = Infallible; - fn deserialize(self, v: &Payload) -> Result, Self::Error> { - let v: Vec = Self.deserialize(v)?; - Ok(Cow::Owned(v)) + fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { + Ok(Cow::from(v)) } } impl<'a> From<&'a Payload> for Cow<'a, [u8]> { fn from(value: &'a Payload) -> Self { - value.contiguous() + value.0.contiguous() } } @@ -214,11 +220,11 @@ impl Serialize<&str> for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, String> for ZSerde { type Error = FromUtf8Error; fn deserialize(self, v: &Payload) -> Result { - String::from_utf8(v.contiguous().to_vec()) + String::from_utf8(Vec::from(v)) } } @@ -246,7 +252,7 @@ impl<'a> Serialize> for ZSerde { } } -impl<'a> Deserialize> for ZSerde { +impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { type Error = FromUtf8Error; fn deserialize(self, v: &Payload) -> Result, Self::Error> { @@ -255,10 +261,10 @@ impl<'a> Deserialize> for ZSerde { } } -impl TryFrom<&Payload> for Cow<'_, str> { +impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { type Error = FromUtf8Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &'a Payload) -> Result { ZSerde.deserialize(value) } } @@ -295,16 +301,19 @@ macro_rules! impl_int { } } - impl Deserialize<$t> for ZSerde { + impl<'a> Deserialize<'a, $t> for ZSerde { type Error = ZDeserializeError; fn deserialize(self, v: &Payload) -> Result<$t, Self::Error> { - let p = v.contiguous(); + use std::io::Read; + + let mut r = v.reader(); let mut bs = (0 as $t).to_le_bytes(); - if p.len() > bs.len() { + if v.len() > bs.len() { return Err(ZDeserializeError); } - bs[..p.len()].copy_from_slice(&p); + r.read_exact(&mut bs[..v.len()]) + .map_err(|_| ZDeserializeError)?; let t = <$t>::from_le_bytes(bs); Ok(t) } @@ -349,15 +358,12 @@ impl Serialize for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, bool> for ZSerde { type Error = ZDeserializeError; fn deserialize(self, v: &Payload) -> Result { - let p = v.contiguous(); - if p.len() != 1 { - return Err(ZDeserializeError); - } - match p[0] { + let p = v.deserialize::().map_err(|_| ZDeserializeError)?; + match p { 0 => Ok(false), 1 => Ok(true), _ => Err(ZDeserializeError), @@ -380,7 +386,7 @@ impl Serialize<&serde_json::Value> for ZSerde { fn serialize(self, t: &serde_json::Value) -> Self::Output { let mut payload = Payload::empty(); - serde_json::to_writer(payload.writer(), t)?; + serde_json::to_writer(payload.0.writer(), t)?; Ok(payload) } } @@ -393,7 +399,7 @@ impl Serialize for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, serde_json::Value> for ZSerde { type Error = serde_json::Error; fn deserialize(self, v: &Payload) -> Result { @@ -415,7 +421,7 @@ impl Serialize<&serde_yaml::Value> for ZSerde { fn serialize(self, t: &serde_yaml::Value) -> Self::Output { let mut payload = Payload::empty(); - serde_yaml::to_writer(payload.writer(), t)?; + serde_yaml::to_writer(payload.0.writer(), t)?; Ok(payload) } } @@ -428,7 +434,7 @@ impl Serialize for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, serde_yaml::Value> for ZSerde { type Error = serde_yaml::Error; fn deserialize(self, v: &Payload) -> Result { @@ -450,7 +456,7 @@ impl Serialize<&serde_cbor::Value> for ZSerde { fn serialize(self, t: &serde_cbor::Value) -> Self::Output { let mut payload = Payload::empty(); - serde_cbor::to_writer(payload.writer(), t)?; + serde_cbor::to_writer(payload.0.writer(), t)?; Ok(payload) } } @@ -463,7 +469,7 @@ impl Serialize for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, serde_cbor::Value> for ZSerde { type Error = serde_cbor::Error; fn deserialize(self, v: &Payload) -> Result { @@ -486,7 +492,7 @@ impl Serialize<&serde_pickle::Value> for ZSerde { fn serialize(self, t: &serde_pickle::Value) -> Self::Output { let mut payload = Payload::empty(); serde_pickle::value_to_writer( - &mut payload.writer(), + &mut payload.0.writer(), t, serde_pickle::SerOptions::default(), )?; @@ -502,7 +508,7 @@ impl Serialize for ZSerde { } } -impl Deserialize for ZSerde { +impl Deserialize<'_, serde_pickle::Value> for ZSerde { type Error = serde_pickle::Error; fn deserialize(self, v: &Payload) -> Result { @@ -590,9 +596,12 @@ impl std::fmt::Display for StringOrBase64 { impl From<&Payload> for StringOrBase64 { fn from(v: &Payload) -> Self { use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; - match v.deserialize::() { - Ok(s) => StringOrBase64::String(s), - Err(_) => StringOrBase64::Base64(b64_std_engine.encode(v.contiguous())), + match v.deserialize::>() { + Ok(s) => StringOrBase64::String(s.into_owned()), + Err(_) => { + let cow: Cow<'_, [u8]> = Cow::from(v); + StringOrBase64::Base64(b64_std_engine.encode(cow)) + } } } } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 603939bc0e..e6a3356559 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -20,10 +20,7 @@ fn pubsub() { let _sub = zenoh .declare_subscriber("test/attachment") .callback(|sample| { - println!( - "{}", - std::str::from_utf8(&sample.payload().contiguous()).unwrap() - ); + println!("{}", sample.payload().deserialize::().unwrap()); for (k, v) in sample.attachment().unwrap() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } @@ -72,13 +69,10 @@ fn queries() { .callback(|query| { println!( "{}", - std::str::from_utf8( - &query - .value() - .map(|q| q.payload.contiguous()) - .unwrap_or_default() - ) - .unwrap() + query + .value() + .map(|q| q.payload.deserialize::().unwrap()) + .unwrap_or_default() ); let mut attachment = Attachment::new(); for (k, v) in query.attachment().unwrap() { From 2be4fa90ada9eff64827ef24da3ded1de919f7fc Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 12:45:42 +0100 Subject: [PATCH 058/357] clippy warning fix (#867) --- commons/zenoh-macros/build.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/commons/zenoh-macros/build.rs b/commons/zenoh-macros/build.rs index d5ce6632dc..be5abe870b 100644 --- a/commons/zenoh-macros/build.rs +++ b/commons/zenoh-macros/build.rs @@ -25,6 +25,7 @@ fn main() { .create(true) .truncate(true) .write(true) + .truncate(true) .open(version_rs) .unwrap(); version_rs.write_all(&output.stdout).unwrap(); From 448535495d2b901c673e9d839908f646700f9719 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 27 Mar 2024 13:13:34 +0100 Subject: [PATCH 059/357] removed extra truncate appeared from different PRs --- commons/zenoh-macros/build.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/commons/zenoh-macros/build.rs b/commons/zenoh-macros/build.rs index be5abe870b..d5ce6632dc 100644 --- a/commons/zenoh-macros/build.rs +++ b/commons/zenoh-macros/build.rs @@ -25,7 +25,6 @@ fn main() { .create(true) .truncate(true) .write(true) - .truncate(true) .open(version_rs) .unwrap(); version_rs.write_all(&output.stdout).unwrap(); From ce5b6108537599424f5ab0d6da9887b05f966e59 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 11:20:29 +0100 Subject: [PATCH 060/357] sample api for GetBuilder --- examples/examples/z_ping.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- .../src/replica/align_queryable.rs | 4 +- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 4 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/liveliness.rs | 5 +- zenoh/src/net/runtime/adminspace.rs | 4 +- zenoh/src/publication.rs | 38 ++++--- zenoh/src/query.rs | 100 ++++++++++++++---- zenoh/src/queryable.rs | 32 +++--- zenoh/src/sample.rs | 75 +++++++++---- zenoh/src/sample_builder.rs | 62 ++++++----- zenoh/src/session.rs | 9 +- zenoh/src/value.rs | 26 +++-- 17 files changed, 249 insertions(+), 122 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index b40afc1f53..79a1e16514 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -36,7 +36,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) - .express(express) + .is_express(express) .res() .unwrap(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 0003958b5d..a629cce3cf 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -35,7 +35,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_pong) .congestion_control(CongestionControl::Block) - .express(express) + .is_express(express) .res() .unwrap(); diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 7e7c1ac9b5..c9b9fe64f3 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -42,7 +42,7 @@ fn main() { .declare_publisher("test/thr") .congestion_control(CongestionControl::Block) .priority(prio) - .express(args.express) + .is_express(args.express) .res() .unwrap(); diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 8a85f14caa..74da23679f 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -34,7 +34,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; -use zenoh::sample_builder::PutSampleBuilderTrait; +use zenoh::sample_builder::ValueBuilderTrait; use zenoh::selector::TIME_RANGE_KEY; use zenoh::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 691fabd7a7..e5c4840666 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,8 +20,8 @@ use std::str; use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::PutSampleBuilderTrait; -use zenoh::sample_builder::SampleBuilderTrait; +use zenoh::sample_builder::TimestampBuilderTrait; +use zenoh::sample_builder::ValueBuilderTrait; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index a899196e7e..4119a941e5 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::{PutSampleBuilder, PutSampleBuilderTrait, SampleBuilderTrait}; +use zenoh::sample_builder::{PutSampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::time::Timestamp; use zenoh::Session; use zenoh_core::{AsyncResolve, SyncResolve}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 04a707bfda..69c973de39 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -27,7 +27,7 @@ use zenoh::key_expr::KeyExpr; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::{Sample, SampleKind}; use zenoh::sample_builder::{ - DeleteSampleBuilder, PutSampleBuilder, PutSampleBuilderTrait, SampleBuilder, SampleBuilderTrait, + DeleteSampleBuilder, PutSampleBuilder, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait, }; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; @@ -719,7 +719,7 @@ fn construct_update(data: String) -> Update { for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).with_encoding(result.2); + let value = Value::new(payload).with_encoding(result.2.into()); let data = StoredData { value, timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 5c302840b8..e6b269cfbd 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,7 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample_builder::{SampleBuilder, SampleBuilderTrait}; +use zenoh::sample_builder::{SampleBuilder, TimestampBuilderTrait}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 425aa62592..6aac3d3908 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -15,6 +15,8 @@ //! Liveliness primitives. //! //! see [`Liveliness`] +use zenoh_protocol::network::request; + use crate::{query::Reply, Id}; #[zenoh_macros::unstable] @@ -740,18 +742,19 @@ where { fn res_sync(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); - self.session .query( &self.key_expr?.into(), &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), QueryTarget::DEFAULT, QueryConsolidation::DEFAULT, + request::ext::QoSType::REQUEST.into(), Locality::default(), self.timeout, None, #[cfg(feature = "unstable")] None, + SourceInfo::empty(), callback, ) .map(|_| receiver) diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 2a2b318cde..9047e8b112 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -20,7 +20,7 @@ use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use crate::queryable::Query; use crate::queryable::QueryInner; -use crate::sample_builder::PutSampleBuilderTrait; +use crate::sample_builder::ValueBuilderTrait; use crate::value::Value; use async_std::task; use log::{error, trace}; @@ -426,7 +426,7 @@ impl Primitives for AdminSpace { parameters, value: query .ext_body - .map(|b| Value::from(b.payload).with_encoding(b.encoding)), + .map(|b| Value::from(b.payload).with_encoding(b.encoding.into())), qid: msg.id, zid, primitives, diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 8772319593..81a12133ed 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -19,7 +19,7 @@ use crate::prelude::*; use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; use crate::sample_builder::{ - DeleteSampleBuilderTrait, PutSampleBuilderTrait, QoSBuilderTrait, SampleBuilderTrait, + QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; use crate::SessionRef; use crate::Undeclarable; @@ -114,9 +114,9 @@ impl QoSBuilderTrait for PutBuilder<'_, '_> { } } #[inline] - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { - publisher: self.publisher.express(is_express), + publisher: self.publisher.is_express(is_express), ..self } } @@ -138,15 +138,15 @@ impl QoSBuilderTrait for DeleteBuilder<'_, '_> { } } #[inline] - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { - publisher: self.publisher.express(is_express), + publisher: self.publisher.is_express(is_express), ..self } } } -impl SampleBuilderTrait for PutBuilder<'_, '_> { +impl TimestampBuilderTrait for PutBuilder<'_, '_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { timestamp, ..self } } @@ -156,6 +156,9 @@ impl SampleBuilderTrait for PutBuilder<'_, '_> { ..self } } +} + +impl SampleBuilderTrait for PutBuilder<'_, '_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -176,7 +179,7 @@ impl SampleBuilderTrait for PutBuilder<'_, '_> { } } -impl SampleBuilderTrait for DeleteBuilder<'_, '_> { +impl TimestampBuilderTrait for DeleteBuilder<'_, '_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { timestamp, ..self } } @@ -186,6 +189,9 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { ..self } } +} + +impl SampleBuilderTrait for DeleteBuilder<'_, '_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -206,7 +212,7 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { } } -impl PutSampleBuilderTrait for PutBuilder<'_, '_> { +impl ValueBuilderTrait for PutBuilder<'_, '_> { fn with_encoding(self, encoding: Encoding) -> Self { Self { encoding, ..self } } @@ -222,8 +228,6 @@ impl PutSampleBuilderTrait for PutBuilder<'_, '_> { } } -impl DeleteSampleBuilderTrait for DeleteBuilder<'_, '_> {} - impl PutBuilder<'_, '_> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). @@ -761,7 +765,7 @@ pub struct DeletePublication<'a> { pub(crate) attachment: Option, } -impl SampleBuilderTrait for PutPublication<'_> { +impl TimestampBuilderTrait for PutPublication<'_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { timestamp, ..self } } @@ -772,7 +776,9 @@ impl SampleBuilderTrait for PutPublication<'_> { ..self } } +} +impl SampleBuilderTrait for PutPublication<'_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -795,7 +801,7 @@ impl SampleBuilderTrait for PutPublication<'_> { } } -impl PutSampleBuilderTrait for PutPublication<'_> { +impl ValueBuilderTrait for PutPublication<'_> { fn with_encoding(self, encoding: Encoding) -> Self { Self { encoding, ..self } } @@ -811,7 +817,7 @@ impl PutSampleBuilderTrait for PutPublication<'_> { } } -impl SampleBuilderTrait for DeletePublication<'_> { +impl TimestampBuilderTrait for DeletePublication<'_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { timestamp, ..self } } @@ -822,7 +828,9 @@ impl SampleBuilderTrait for DeletePublication<'_> { ..self } } +} +impl SampleBuilderTrait for DeletePublication<'_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -845,8 +853,6 @@ impl SampleBuilderTrait for DeletePublication<'_> { } } -impl DeleteSampleBuilderTrait for DeletePublication<'_> {} - impl Resolvable for PutPublication<'_> { type To = ZResult<()>; } @@ -1010,7 +1016,7 @@ impl QoSBuilderTrait for PublisherBuilder<'_, '_> { /// When express is set to `true`, then the message will not be batched. /// This usually has a positive impact on latency but negative impact on throughput. #[inline] - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { is_express, ..self } } } diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index fe48748ad4..6a0c4b1933 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -17,6 +17,8 @@ use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; +use crate::sample::QoSBuilder; +use crate::sample_builder::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}; use crate::Session; use std::collections::HashMap; use std::future::Ready; @@ -120,12 +122,70 @@ pub struct GetBuilder<'a, 'b, Handler> { pub(crate) scope: ZResult>>, pub(crate) target: QueryTarget, pub(crate) consolidation: QueryConsolidation, + pub(crate) qos: QoSBuilder, pub(crate) destination: Locality, pub(crate) timeout: Duration, pub(crate) handler: Handler, pub(crate) value: Option, #[cfg(feature = "unstable")] pub(crate) attachment: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, +} + +impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { + #[cfg(feature = "unstable")] + fn with_source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + + #[cfg(feature = "unstable")] + fn with_attachment_opt(self, attachment: Option) -> Self { + Self { attachment, ..self } + } + + #[cfg(feature = "unstable")] + fn with_attachment(self, attachment: Attachment) -> Self { + Self { + attachment: Some(attachment), + ..self + } + } +} + +impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let qos = self.qos.congestion_control(congestion_control); + Self { qos, ..self } + } + + fn priority(self, priority: Priority) -> Self { + let qos = self.qos.priority(priority); + Self { qos, ..self } + } + + fn is_express(self, is_express: bool) -> Self { + let qos = self.qos.is_express(is_express); + Self { qos, ..self } + } +} + +impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { + fn with_encoding(self, encoding: Encoding) -> Self { + let value = Some(self.value.unwrap_or_default().with_encoding(encoding)); + Self { value, ..self } + } + + fn with_payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + let value = Some(self.value.unwrap_or_default().with_payload(payload)); + Self { value, ..self } + } } impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { @@ -156,11 +216,14 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, + qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, + #[cfg(feature = "unstable")] + source_info, handler: _, } = self; GetBuilder { @@ -169,11 +232,14 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, + qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, + #[cfg(feature = "unstable")] + source_info, handler: callback, } } @@ -239,11 +305,14 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, + qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, + #[cfg(feature = "unstable")] + source_info, handler: _, } = self; GetBuilder { @@ -252,11 +321,14 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, + qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, + #[cfg(feature = "unstable")] + source_info, handler, } } @@ -315,29 +387,11 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { /// expressions that don't intersect with the query's. #[zenoh_macros::unstable] pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { - let Self { - session, - selector, - scope, - target, - consolidation, - destination, - timeout, - value, - attachment, - handler, - } = self; Self { - session, - selector: selector.and_then(|s| s.accept_any_keyexpr(accept == ReplyKeyExpr::Any)), - scope, - target, - consolidation, - destination, - timeout, - value, - attachment, - handler, + selector: self + .selector + .and_then(|s| s.accept_any_keyexpr(accept == ReplyKeyExpr::Any)), + ..self } } } @@ -382,11 +436,13 @@ where &self.scope?, self.target, self.consolidation, + self.qos.into(), self.destination, self.timeout, self.value, #[cfg(feature = "unstable")] self.attachment, + self.source_info, callback, ) .map(|_| receiver) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 14e9d09068..a9b469a340 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -20,8 +20,8 @@ use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::SourceInfo; use crate::sample_builder::{ - DeleteSampleBuilder, DeleteSampleBuilderTrait, PutSampleBuilder, PutSampleBuilderTrait, - QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, + DeleteSampleBuilder, PutSampleBuilder, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, + TimestampBuilderTrait, ValueBuilderTrait, }; use crate::Id; use crate::SessionRef; @@ -238,7 +238,7 @@ impl<'a> ReplySampleBuilder<'a> { } } -impl SampleBuilderTrait for ReplySampleBuilder<'_> { +impl TimestampBuilderTrait for ReplySampleBuilder<'_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), @@ -252,7 +252,9 @@ impl SampleBuilderTrait for ReplySampleBuilder<'_> { ..self } } +} +impl SampleBuilderTrait for ReplySampleBuilder<'_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -293,9 +295,9 @@ impl QoSBuilderTrait for ReplySampleBuilder<'_> { } } - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.express(is_express), + sample_builder: self.sample_builder.is_express(is_express), ..self } } @@ -328,7 +330,7 @@ pub struct ReplyBuilder<'a> { sample_builder: PutSampleBuilder, } -impl SampleBuilderTrait for ReplyBuilder<'_> { +impl TimestampBuilderTrait for ReplyBuilder<'_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), @@ -342,7 +344,9 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { ..self } } +} +impl SampleBuilderTrait for ReplyBuilder<'_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -383,15 +387,15 @@ impl QoSBuilderTrait for ReplyBuilder<'_> { } } - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.express(is_express), + sample_builder: self.sample_builder.is_express(is_express), ..self } } } -impl PutSampleBuilderTrait for ReplyBuilder<'_> { +impl ValueBuilderTrait for ReplyBuilder<'_> { fn with_encoding(self, encoding: Encoding) -> Self { Self { sample_builder: self.sample_builder.with_encoding(encoding), @@ -418,7 +422,7 @@ pub struct ReplyDelBuilder<'a> { sample_builder: DeleteSampleBuilder, } -impl SampleBuilderTrait for ReplyDelBuilder<'_> { +impl TimestampBuilderTrait for ReplyDelBuilder<'_> { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp_opt(timestamp), @@ -432,7 +436,9 @@ impl SampleBuilderTrait for ReplyDelBuilder<'_> { ..self } } +} +impl SampleBuilderTrait for ReplyDelBuilder<'_> { #[cfg(feature = "unstable")] fn with_source_info(self, source_info: SourceInfo) -> Self { Self { @@ -473,16 +479,14 @@ impl QoSBuilderTrait for ReplyDelBuilder<'_> { } } - fn express(self, is_express: bool) -> Self { + fn is_express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.express(is_express), + sample_builder: self.sample_builder.is_express(is_express), ..self } } } -impl DeleteSampleBuilderTrait for ReplyDelBuilder<'_> {} - /// A builder returned by [`Query::reply_err()`](Query::reply_err). #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 1998f3e844..2dbeebe717 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -16,13 +16,16 @@ use crate::encoding::Encoding; use crate::payload::Payload; use crate::prelude::{KeyExpr, Value}; +use crate::sample_builder::{QoSBuilderTrait, ValueBuilderTrait}; use crate::time::Timestamp; use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; +use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::{core::CongestionControl, network::push::ext::QoSType, zenoh}; +use zenoh_protocol::network::declare::ext::QoSType; +use zenoh_protocol::{core::CongestionControl, zenoh}; pub type SourceSn = u64; @@ -566,6 +569,58 @@ pub struct QoS { inner: QoSType, } +#[derive(Debug)] +pub struct QoSBuilder(QoS); + +impl From for QoSBuilder { + fn from(qos: QoS) -> Self { + QoSBuilder(qos) + } +} + +impl From for QoS { + fn from(builder: QoSBuilder) -> Self { + builder.0 + } +} + +impl Resolvable for QoSBuilder { + type To = QoS; +} + +impl SyncResolve for QoSBuilder { + fn res_sync(self) -> ::To { + self.0 + } +} + +impl AsyncResolve for QoSBuilder { + type Future = futures::future::Ready; + fn res_async(self) -> Self::Future { + futures::future::ready(self.0) + } +} + +impl QoSBuilderTrait for QoSBuilder { + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let mut inner = self.0.inner; + inner.set_congestion_control(congestion_control); + Self(QoS { inner }) + } + + fn priority(self, priority: Priority) -> Self { + let mut inner = self.0.inner; + inner.set_priority(priority.into()); + Self(QoS { inner }) + } + + fn is_express(self, is_express: bool) -> Self { + let mut inner = self.0.inner; + inner.set_is_express(is_express); + Self(QoS { inner }) + } +} + impl QoS { /// Gets priority of the message. pub fn priority(&self) -> Priority { @@ -590,24 +645,6 @@ impl QoS { pub fn express(&self) -> bool { self.inner.is_express() } - - /// Sets priority value. - pub fn with_priority(mut self, priority: Priority) -> Self { - self.inner.set_priority(priority.into()); - self - } - - /// Sets congestion control value. - pub fn with_congestion_control(mut self, congestion_control: CongestionControl) -> Self { - self.inner.set_congestion_control(congestion_control); - self - } - - /// Sets express flag vlaue. - pub fn with_express(mut self, is_express: bool) -> Self { - self.inner.set_is_express(is_express); - self - } } impl From for QoS { diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 7e38e84afd..5aca7ff1da 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -14,6 +14,7 @@ use crate::sample::Attachment; use crate::sample::QoS; +use crate::sample::QoSBuilder; use crate::sample::SourceInfo; use crate::Encoding; use crate::KeyExpr; @@ -36,14 +37,17 @@ pub trait QoSBuilderTrait { /// Change the `express` policy to apply when routing the data. /// When express is set to `true`, then the message will not be batched. /// This usually has a positive impact on latency but negative impact on throughput. - fn express(self, is_express: bool) -> Self; + fn is_express(self, is_express: bool) -> Self; } -pub trait SampleBuilderTrait { +pub trait TimestampBuilderTrait { /// Sets of clears timestamp fn with_timestamp_opt(self, timestamp: Option) -> Self; /// Sets timestamp fn with_timestamp(self, timestamp: Timestamp) -> Self; +} + +pub trait SampleBuilderTrait { /// Attach source information #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self; @@ -55,16 +59,15 @@ pub trait SampleBuilderTrait { fn with_attachment(self, attachment: Attachment) -> Self; } -pub trait PutSampleBuilderTrait: SampleBuilderTrait { +pub trait ValueBuilderTrait { /// Set the [`Encoding`] fn with_encoding(self, encoding: Encoding) -> Self; + /// Sets the payload fn with_payload(self, payload: IntoPayload) -> Self where IntoPayload: Into; } -pub trait DeleteSampleBuilderTrait: SampleBuilderTrait {} - #[derive(Debug)] pub struct SampleBuilder(Sample); @@ -98,7 +101,7 @@ impl SampleBuilder { } } -impl SampleBuilderTrait for SampleBuilder { +impl TimestampBuilderTrait for SampleBuilder { fn with_timestamp_opt(self, timestamp: Option) -> Self { Self(Sample { timestamp, @@ -109,7 +112,9 @@ impl SampleBuilderTrait for SampleBuilder { fn with_timestamp(self, timestamp: Timestamp) -> Self { self.with_timestamp_opt(Some(timestamp)) } +} +impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(Sample { @@ -134,22 +139,19 @@ impl SampleBuilderTrait for SampleBuilder { impl QoSBuilderTrait for SampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self(Sample { - qos: self.0.qos.with_congestion_control(congestion_control), - ..self.0 - }) + let qos: QoSBuilder = self.0.qos.into(); + let qos = qos.congestion_control(congestion_control).res_sync(); + Self(Sample { qos, ..self.0 }) } fn priority(self, priority: Priority) -> Self { - Self(Sample { - qos: self.0.qos.with_priority(priority), - ..self.0 - }) + let qos: QoSBuilder = self.0.qos.into(); + let qos = qos.priority(priority).res_sync(); + Self(Sample { qos, ..self.0 }) } - fn express(self, is_express: bool) -> Self { - Self(Sample { - qos: self.0.qos.with_express(is_express), - ..self.0 - }) + fn is_express(self, is_express: bool) -> Self { + let qos: QoSBuilder = self.0.qos.into(); + let qos = qos.is_express(is_express).res_sync(); + Self(Sample { qos, ..self.0 }) } } @@ -197,13 +199,16 @@ impl PutSampleBuilder { } } -impl SampleBuilderTrait for PutSampleBuilder { +impl TimestampBuilderTrait for PutSampleBuilder { fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } fn with_timestamp_opt(self, timestamp: Option) -> Self { Self(self.0.with_timestamp_opt(timestamp)) } +} + +impl SampleBuilderTrait for PutSampleBuilder { #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(self.0.with_source_info(source_info)) @@ -225,12 +230,12 @@ impl QoSBuilderTrait for PutSampleBuilder { fn priority(self, priority: Priority) -> Self { Self(self.0.priority(priority)) } - fn express(self, is_express: bool) -> Self { - Self(self.0.express(is_express)) + fn is_express(self, is_express: bool) -> Self { + Self(self.0.is_express(is_express)) } } -impl PutSampleBuilderTrait for PutSampleBuilder { +impl ValueBuilderTrait for PutSampleBuilder { fn with_encoding(self, encoding: Encoding) -> Self { Self(SampleBuilder(Sample { encoding, @@ -291,13 +296,16 @@ impl DeleteSampleBuilder { } } -impl SampleBuilderTrait for DeleteSampleBuilder { +impl TimestampBuilderTrait for DeleteSampleBuilder { fn with_timestamp(self, timestamp: Timestamp) -> Self { Self(self.0.with_timestamp(timestamp)) } fn with_timestamp_opt(self, timestamp: Option) -> Self { Self(self.0.with_timestamp_opt(timestamp)) } +} + +impl SampleBuilderTrait for DeleteSampleBuilder { #[zenoh_macros::unstable] fn with_source_info(self, source_info: SourceInfo) -> Self { Self(self.0.with_source_info(source_info)) @@ -319,13 +327,11 @@ impl QoSBuilderTrait for DeleteSampleBuilder { fn priority(self, priority: Priority) -> Self { Self(self.0.priority(priority)) } - fn express(self, is_express: bool) -> Self { - Self(self.0.express(is_express)) + fn is_express(self, is_express: bool) -> Self { + Self(self.0.is_express(is_express)) } } -impl DeleteSampleBuilderTrait for DeleteSampleBuilder {} - impl From for SampleBuilder { fn from(sample: Sample) -> Self { SampleBuilder(sample) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index ffe7036050..5b80adb0e5 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -784,18 +784,21 @@ impl Session { let conf = self.runtime.config().lock(); Duration::from_millis(unwrap_or_default!(conf.queries_default_timeout())) }; + let qos: QoS = request::ext::QoSType::REQUEST.into(); GetBuilder { session: self, selector, scope: Ok(None), target: QueryTarget::DEFAULT, consolidation: QueryConsolidation::DEFAULT, + qos: qos.into(), destination: Locality::default(), timeout, value: None, #[cfg(feature = "unstable")] attachment: None, handler: DefaultHandler, + source_info: SourceInfo::empty(), } } } @@ -1567,10 +1570,12 @@ impl Session { scope: &Option>, target: QueryTarget, consolidation: QueryConsolidation, + qos: QoS, destination: Locality, timeout: Duration, value: Option, #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] source: SourceInfo, callback: Callback<'static, Reply>, ) -> ZResult<()> { log::trace!("get({}, {:?}, {:?})", selector, target, consolidation); @@ -1649,7 +1654,7 @@ impl Session { primitives.send_request(Request { id: qid, wire_expr: wexpr.clone(), - ext_qos: request::ext::QoSType::REQUEST, + ext_qos: qos.into(), ext_tstamp: None, ext_nodeid: request::ext::NodeIdType::DEFAULT, ext_target: target, @@ -1658,7 +1663,7 @@ impl Session { payload: RequestBody::Query(zenoh_protocol::zenoh::Query { consolidation, parameters: selector.parameters().to_string(), - ext_sinfo: None, + ext_sinfo: source.into(), ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 128f0ff605..2d98cbf398 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,7 +13,7 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload}; +use crate::{encoding::Encoding, payload::Payload, sample_builder::ValueBuilderTrait}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] @@ -36,7 +36,6 @@ impl Value { encoding: Encoding::default(), } } - /// Creates an empty [`Value`]. pub const fn empty() -> Self { Value { @@ -44,15 +43,20 @@ impl Value { encoding: Encoding::default(), } } +} - /// Sets the encoding of this [`Value`]`. - #[inline(always)] - pub fn with_encoding(mut self, encoding: IntoEncoding) -> Self +impl ValueBuilderTrait for Value { + fn with_encoding(self, encoding: Encoding) -> Self { + Self { encoding, ..self } + } + fn with_payload(self, payload: IntoPayload) -> Self where - IntoEncoding: Into, + IntoPayload: Into, { - self.encoding = encoding.into(); - self + Self { + payload: payload.into(), + ..self + } } } @@ -67,3 +71,9 @@ where } } } + +impl Default for Value { + fn default() -> Self { + Value::empty() + } +} From 0bce160e13947dacb6ed110b571578a93b70b8ae Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 11:23:52 +0100 Subject: [PATCH 061/357] restored "express" name --- examples/examples/z_ping.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- zenoh/src/publication.rs | 10 +++++----- zenoh/src/query.rs | 4 ++-- zenoh/src/queryable.rs | 12 ++++++------ zenoh/src/sample.rs | 2 +- zenoh/src/sample_builder.rs | 14 +++++++------- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 79a1e16514..b40afc1f53 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -36,7 +36,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) - .is_express(express) + .express(express) .res() .unwrap(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index a629cce3cf..0003958b5d 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -35,7 +35,7 @@ fn main() { let publisher = session .declare_publisher(key_expr_pong) .congestion_control(CongestionControl::Block) - .is_express(express) + .express(express) .res() .unwrap(); diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index c9b9fe64f3..7e7c1ac9b5 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -42,7 +42,7 @@ fn main() { .declare_publisher("test/thr") .congestion_control(CongestionControl::Block) .priority(prio) - .is_express(args.express) + .express(args.express) .res() .unwrap(); diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 81a12133ed..e60e40d295 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -114,9 +114,9 @@ impl QoSBuilderTrait for PutBuilder<'_, '_> { } } #[inline] - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { - publisher: self.publisher.is_express(is_express), + publisher: self.publisher.express(is_express), ..self } } @@ -138,9 +138,9 @@ impl QoSBuilderTrait for DeleteBuilder<'_, '_> { } } #[inline] - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { - publisher: self.publisher.is_express(is_express), + publisher: self.publisher.express(is_express), ..self } } @@ -1016,7 +1016,7 @@ impl QoSBuilderTrait for PublisherBuilder<'_, '_> { /// When express is set to `true`, then the message will not be batched. /// This usually has a positive impact on latency but negative impact on throughput. #[inline] - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { is_express, ..self } } } diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 6a0c4b1933..db17715a89 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -167,8 +167,8 @@ impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { Self { qos, ..self } } - fn is_express(self, is_express: bool) -> Self { - let qos = self.qos.is_express(is_express); + fn express(self, is_express: bool) -> Self { + let qos = self.qos.express(is_express); Self { qos, ..self } } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index a9b469a340..d9327415f5 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -295,9 +295,9 @@ impl QoSBuilderTrait for ReplySampleBuilder<'_> { } } - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.is_express(is_express), + sample_builder: self.sample_builder.express(is_express), ..self } } @@ -387,9 +387,9 @@ impl QoSBuilderTrait for ReplyBuilder<'_> { } } - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.is_express(is_express), + sample_builder: self.sample_builder.express(is_express), ..self } } @@ -479,9 +479,9 @@ impl QoSBuilderTrait for ReplyDelBuilder<'_> { } } - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { Self { - sample_builder: self.sample_builder.is_express(is_express), + sample_builder: self.sample_builder.express(is_express), ..self } } diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 2dbeebe717..d774e5e007 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -614,7 +614,7 @@ impl QoSBuilderTrait for QoSBuilder { Self(QoS { inner }) } - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { let mut inner = self.0.inner; inner.set_is_express(is_express); Self(QoS { inner }) diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 5aca7ff1da..b13bfce346 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -37,7 +37,7 @@ pub trait QoSBuilderTrait { /// Change the `express` policy to apply when routing the data. /// When express is set to `true`, then the message will not be batched. /// This usually has a positive impact on latency but negative impact on throughput. - fn is_express(self, is_express: bool) -> Self; + fn express(self, is_express: bool) -> Self; } pub trait TimestampBuilderTrait { @@ -148,9 +148,9 @@ impl QoSBuilderTrait for SampleBuilder { let qos = qos.priority(priority).res_sync(); Self(Sample { qos, ..self.0 }) } - fn is_express(self, is_express: bool) -> Self { + fn express(self, is_express: bool) -> Self { let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.is_express(is_express).res_sync(); + let qos = qos.express(is_express).res_sync(); Self(Sample { qos, ..self.0 }) } } @@ -230,8 +230,8 @@ impl QoSBuilderTrait for PutSampleBuilder { fn priority(self, priority: Priority) -> Self { Self(self.0.priority(priority)) } - fn is_express(self, is_express: bool) -> Self { - Self(self.0.is_express(is_express)) + fn express(self, is_express: bool) -> Self { + Self(self.0.express(is_express)) } } @@ -327,8 +327,8 @@ impl QoSBuilderTrait for DeleteSampleBuilder { fn priority(self, priority: Priority) -> Self { Self(self.0.priority(priority)) } - fn is_express(self, is_express: bool) -> Self { - Self(self.0.is_express(is_express)) + fn express(self, is_express: bool) -> Self { + Self(self.0.express(is_express)) } } From 3620c3a7d057c312ff8354bffef40f79424aee80 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 11:32:11 +0100 Subject: [PATCH 062/357] removed 'timestamp_opt' --- zenoh/src/publication.rs | 34 ++++------------------------------ zenoh/src/queryable.rs | 27 +++------------------------ zenoh/src/sample_builder.rs | 20 ++++---------------- 3 files changed, 11 insertions(+), 70 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index e60e40d295..f8a42077b9 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -147,15 +147,9 @@ impl QoSBuilderTrait for DeleteBuilder<'_, '_> { } impl TimestampBuilderTrait for PutBuilder<'_, '_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { timestamp, ..self } } - fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { - Self { - timestamp: Some(timestamp), - ..self - } - } } impl SampleBuilderTrait for PutBuilder<'_, '_> { @@ -180,15 +174,9 @@ impl SampleBuilderTrait for PutBuilder<'_, '_> { } impl TimestampBuilderTrait for DeleteBuilder<'_, '_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { timestamp, ..self } } - fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { - Self { - timestamp: Some(timestamp), - ..self - } - } } impl SampleBuilderTrait for DeleteBuilder<'_, '_> { @@ -766,16 +754,9 @@ pub struct DeletePublication<'a> { } impl TimestampBuilderTrait for PutPublication<'_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { timestamp, ..self } } - - fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { - Self { - timestamp: Some(timestamp), - ..self - } - } } impl SampleBuilderTrait for PutPublication<'_> { @@ -818,16 +799,9 @@ impl ValueBuilderTrait for PutPublication<'_> { } impl TimestampBuilderTrait for DeletePublication<'_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { timestamp, ..self } } - - fn with_timestamp(self, timestamp: uhlc::Timestamp) -> Self { - Self { - timestamp: Some(timestamp), - ..self - } - } } impl SampleBuilderTrait for DeletePublication<'_> { diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index d9327415f5..625ae6f25f 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -239,14 +239,7 @@ impl<'a> ReplySampleBuilder<'a> { } impl TimestampBuilderTrait for ReplySampleBuilder<'_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { - Self { - sample_builder: self.sample_builder.with_timestamp_opt(timestamp), - ..self - } - } - - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), ..self @@ -331,14 +324,7 @@ pub struct ReplyBuilder<'a> { } impl TimestampBuilderTrait for ReplyBuilder<'_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { - Self { - sample_builder: self.sample_builder.with_timestamp_opt(timestamp), - ..self - } - } - - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), ..self @@ -423,14 +409,7 @@ pub struct ReplyDelBuilder<'a> { } impl TimestampBuilderTrait for ReplyDelBuilder<'_> { - fn with_timestamp_opt(self, timestamp: Option) -> Self { - Self { - sample_builder: self.sample_builder.with_timestamp_opt(timestamp), - ..self - } - } - - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self { sample_builder: self.sample_builder.with_timestamp(timestamp), ..self diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index b13bfce346..990586ca0f 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -42,9 +42,7 @@ pub trait QoSBuilderTrait { pub trait TimestampBuilderTrait { /// Sets of clears timestamp - fn with_timestamp_opt(self, timestamp: Option) -> Self; - /// Sets timestamp - fn with_timestamp(self, timestamp: Timestamp) -> Self; + fn with_timestamp(self, timestamp: Option) -> Self; } pub trait SampleBuilderTrait { @@ -102,16 +100,12 @@ impl SampleBuilder { } impl TimestampBuilderTrait for SampleBuilder { - fn with_timestamp_opt(self, timestamp: Option) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self(Sample { timestamp, ..self.0 }) } - - fn with_timestamp(self, timestamp: Timestamp) -> Self { - self.with_timestamp_opt(Some(timestamp)) - } } impl SampleBuilderTrait for SampleBuilder { @@ -200,12 +194,9 @@ impl PutSampleBuilder { } impl TimestampBuilderTrait for PutSampleBuilder { - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self(self.0.with_timestamp(timestamp)) } - fn with_timestamp_opt(self, timestamp: Option) -> Self { - Self(self.0.with_timestamp_opt(timestamp)) - } } impl SampleBuilderTrait for PutSampleBuilder { @@ -297,12 +288,9 @@ impl DeleteSampleBuilder { } impl TimestampBuilderTrait for DeleteSampleBuilder { - fn with_timestamp(self, timestamp: Timestamp) -> Self { + fn with_timestamp(self, timestamp: Option) -> Self { Self(self.0.with_timestamp(timestamp)) } - fn with_timestamp_opt(self, timestamp: Option) -> Self { - Self(self.0.with_timestamp_opt(timestamp)) - } } impl SampleBuilderTrait for DeleteSampleBuilder { From aafd2a4761b8b4df5089d19ef74f71bfe28aa644 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 13:10:32 +0100 Subject: [PATCH 063/357] with removed, into> added --- examples/examples/z_pub.rs | 2 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 10 +-- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/publication.rs | 70 +++++++++---------- zenoh/src/query.rs | 11 +-- zenoh/src/queryable.rs | 60 +++++----------- zenoh/src/sample.rs | 11 +++ zenoh/src/sample_builder.rs | 58 ++++++--------- zenoh/tests/attachments.rs | 10 +-- 11 files changed, 99 insertions(+), 139 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index d22d4d55ee..416ff31f46 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -39,7 +39,7 @@ async fn main() { println!("Putting Data ('{}': '{}')...", &key_expr, buf); let mut put = publisher.put(buf); if let Some(attachment) = &attachment { - put = put.with_attachment( + put = put.attachment( attachment .split('&') .map(|pair| split_once(pair, '=')) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index e5c4840666..973fb89abe 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -130,7 +130,7 @@ impl AlignQueryable { query .reply(k, v.payload) .with_encoding(v.encoding) - .with_timestamp(ts) + .timestamp(ts) .res() .await .unwrap(); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 4119a941e5..9d5257e53f 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -112,7 +112,7 @@ impl Aligner { } = value; let sample = PutSampleBuilder::new(key, payload) .with_encoding(encoding) - .with_timestamp(ts) + .timestamp(ts) .res_sync(); log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 69c973de39..de76ade51d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -239,7 +239,7 @@ impl StorageService { } }; let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let sample = SampleBuilder::from(sample).with_timestamp(timestamp).res_sync(); + let sample = SampleBuilder::from(sample).timestamp(timestamp).res_sync(); self.process_sample(sample).await; }, // on query on key_expr @@ -316,14 +316,14 @@ impl StorageService { } = data.value; PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) .with_encoding(encoding) - .with_timestamp(data.timestamp) + .timestamp(data.timestamp) .res_sync() } Some(Update { kind: SampleKind::Delete, data, }) => DeleteSampleBuilder::new(KeyExpr::from(k.clone())) - .with_timestamp(data.timestamp) + .timestamp(data.timestamp) .res_sync(), None => SampleBuilder::from(sample.clone()) .keyexpr(k.clone()) @@ -533,7 +533,7 @@ impl StorageService { if let Err(e) = q .reply(key.clone(), payload) .with_encoding(encoding) - .with_timestamp(entry.timestamp) + .timestamp(entry.timestamp) .res_async() .await { @@ -568,7 +568,7 @@ impl StorageService { if let Err(e) = q .reply(q.key_expr().clone(), payload) .with_encoding(encoding) - .with_timestamp(entry.timestamp) + .timestamp(entry.timestamp) .res_async() .await { diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index e6b269cfbd..52a4263396 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -665,7 +665,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. let timestamp = s.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let s = SampleBuilder::from(s).with_timestamp(timestamp).res_sync(); + let s = SampleBuilder::from(s).timestamp(timestamp).res_sync(); state.merge_queue.push(s); } } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f8a42077b9..cd68530bf7 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -147,54 +147,52 @@ impl QoSBuilderTrait for DeleteBuilder<'_, '_> { } impl TimestampBuilderTrait for PutBuilder<'_, '_> { - fn with_timestamp(self, timestamp: Option) -> Self { - Self { timestamp, ..self } + fn timestamp>>(self, timestamp: T) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } } } impl SampleBuilderTrait for PutBuilder<'_, '_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, ..self } } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self { attachment, ..self } - } - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - attachment: Some(attachment), + attachment: attachment.into(), ..self } } } impl TimestampBuilderTrait for DeleteBuilder<'_, '_> { - fn with_timestamp(self, timestamp: Option) -> Self { - Self { timestamp, ..self } + fn timestamp>>(self, timestamp: T) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } } } impl SampleBuilderTrait for DeleteBuilder<'_, '_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, ..self } } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self { attachment, ..self } - } - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - attachment: Some(attachment), + attachment: attachment.into(), ..self } } @@ -754,14 +752,17 @@ pub struct DeletePublication<'a> { } impl TimestampBuilderTrait for PutPublication<'_> { - fn with_timestamp(self, timestamp: Option) -> Self { - Self { timestamp, ..self } + fn timestamp>>(self, timestamp: T) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } } } impl SampleBuilderTrait for PutPublication<'_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, ..self @@ -769,14 +770,9 @@ impl SampleBuilderTrait for PutPublication<'_> { } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self { attachment, ..self } - } - - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - attachment: Some(attachment), + attachment: attachment.into(), ..self } } @@ -799,14 +795,17 @@ impl ValueBuilderTrait for PutPublication<'_> { } impl TimestampBuilderTrait for DeletePublication<'_> { - fn with_timestamp(self, timestamp: Option) -> Self { - Self { timestamp, ..self } + fn timestamp>>(self, timestamp: T) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } } } impl SampleBuilderTrait for DeletePublication<'_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, ..self @@ -814,14 +813,9 @@ impl SampleBuilderTrait for DeletePublication<'_> { } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self { attachment, ..self } - } - - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - attachment: Some(attachment), + attachment: attachment.into(), ..self } } diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index db17715a89..2d4e5e1ee3 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -135,7 +135,7 @@ pub struct GetBuilder<'a, 'b, Handler> { impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { source_info, ..self @@ -143,14 +143,9 @@ impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self { attachment, ..self } - } - - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - attachment: Some(attachment), + attachment: attachment.into(), ..self } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 625ae6f25f..66cb34459b 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -239,9 +239,9 @@ impl<'a> ReplySampleBuilder<'a> { } impl TimestampBuilderTrait for ReplySampleBuilder<'_> { - fn with_timestamp(self, timestamp: Option) -> Self { + fn timestamp>>(self, timestamp: T) -> Self { Self { - sample_builder: self.sample_builder.with_timestamp(timestamp), + sample_builder: self.sample_builder.timestamp(timestamp), ..self } } @@ -249,25 +249,17 @@ impl TimestampBuilderTrait for ReplySampleBuilder<'_> { impl SampleBuilderTrait for ReplySampleBuilder<'_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { - sample_builder: self.sample_builder.with_source_info(source_info), + sample_builder: self.sample_builder.source_info(source_info), ..self } } #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - sample_builder: self.sample_builder.with_attachment_opt(attachment), - ..self - } - } - - #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { - Self { - sample_builder: self.sample_builder.with_attachment(attachment), + sample_builder: self.sample_builder.attachment(attachment), ..self } } @@ -324,9 +316,9 @@ pub struct ReplyBuilder<'a> { } impl TimestampBuilderTrait for ReplyBuilder<'_> { - fn with_timestamp(self, timestamp: Option) -> Self { + fn timestamp>>(self, timestamp: T) -> Self { Self { - sample_builder: self.sample_builder.with_timestamp(timestamp), + sample_builder: self.sample_builder.timestamp(timestamp), ..self } } @@ -334,25 +326,17 @@ impl TimestampBuilderTrait for ReplyBuilder<'_> { impl SampleBuilderTrait for ReplyBuilder<'_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { - Self { - sample_builder: self.sample_builder.with_source_info(source_info), - ..self - } - } - - #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { - sample_builder: self.sample_builder.with_attachment_opt(attachment), + sample_builder: self.sample_builder.source_info(source_info), ..self } } #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - sample_builder: self.sample_builder.with_attachment(attachment), + sample_builder: self.sample_builder.attachment(attachment), ..self } } @@ -409,9 +393,9 @@ pub struct ReplyDelBuilder<'a> { } impl TimestampBuilderTrait for ReplyDelBuilder<'_> { - fn with_timestamp(self, timestamp: Option) -> Self { + fn timestamp>>(self, timestamp: T) -> Self { Self { - sample_builder: self.sample_builder.with_timestamp(timestamp), + sample_builder: self.sample_builder.timestamp(timestamp), ..self } } @@ -419,25 +403,17 @@ impl TimestampBuilderTrait for ReplyDelBuilder<'_> { impl SampleBuilderTrait for ReplyDelBuilder<'_> { #[cfg(feature = "unstable")] - fn with_source_info(self, source_info: SourceInfo) -> Self { - Self { - sample_builder: self.sample_builder.with_source_info(source_info), - ..self - } - } - - #[cfg(feature = "unstable")] - fn with_attachment_opt(self, attachment: Option) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { - sample_builder: self.sample_builder.with_attachment_opt(attachment), + sample_builder: self.sample_builder.source_info(source_info), ..self } } #[cfg(feature = "unstable")] - fn with_attachment(self, attachment: Attachment) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self { - sample_builder: self.sample_builder.with_attachment(attachment), + sample_builder: self.sample_builder.attachment(attachment), ..self } } diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index d774e5e007..163ae2090a 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -263,6 +263,17 @@ mod attachment { } } } + #[zenoh_macros::unstable] + impl From for Option { + fn from(value: AttachmentBuilder) -> Self { + if value.inner.is_empty() { + None + } else { + Some(value.into()) + } + } + } + #[zenoh_macros::unstable] #[derive(Clone)] pub struct Attachment { diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 990586ca0f..2d7277506d 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -41,20 +41,17 @@ pub trait QoSBuilderTrait { } pub trait TimestampBuilderTrait { - /// Sets of clears timestamp - fn with_timestamp(self, timestamp: Option) -> Self; + /// Sets of clears timestamp + fn timestamp>>(self, timestamp: T) -> Self; } pub trait SampleBuilderTrait { /// Attach source information #[zenoh_macros::unstable] - fn with_source_info(self, source_info: SourceInfo) -> Self; - /// Attach or remove user-provided data in key-value format - #[zenoh_macros::unstable] - fn with_attachment_opt(self, attachment: Option) -> Self; + fn source_info(self, source_info: SourceInfo) -> Self; /// Attach user-provided data in key-value format #[zenoh_macros::unstable] - fn with_attachment(self, attachment: Attachment) -> Self; + fn attachment>>(self, attachment: T) -> Self; } pub trait ValueBuilderTrait { @@ -100,9 +97,9 @@ impl SampleBuilder { } impl TimestampBuilderTrait for SampleBuilder { - fn with_timestamp(self, timestamp: Option) -> Self { + fn timestamp>>(self, timestamp: T) -> Self { Self(Sample { - timestamp, + timestamp: timestamp.into(), ..self.0 }) } @@ -110,7 +107,7 @@ impl TimestampBuilderTrait for SampleBuilder { impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] - fn with_source_info(self, source_info: SourceInfo) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self(Sample { source_info, ..self.0 @@ -118,17 +115,12 @@ impl SampleBuilderTrait for SampleBuilder { } #[zenoh_macros::unstable] - fn with_attachment_opt(self, attachment: Option) -> Self { + fn attachment>>(self, attachment: T) -> Self { Self(Sample { - attachment, + attachment: attachment.into(), ..self.0 }) } - - #[zenoh_macros::unstable] - fn with_attachment(self, attachment: Attachment) -> Self { - self.with_attachment_opt(Some(attachment)) - } } impl QoSBuilderTrait for SampleBuilder { @@ -194,23 +186,19 @@ impl PutSampleBuilder { } impl TimestampBuilderTrait for PutSampleBuilder { - fn with_timestamp(self, timestamp: Option) -> Self { - Self(self.0.with_timestamp(timestamp)) + fn timestamp>>(self, timestamp: T) -> Self { + Self(self.0.timestamp(timestamp)) } } impl SampleBuilderTrait for PutSampleBuilder { #[zenoh_macros::unstable] - fn with_source_info(self, source_info: SourceInfo) -> Self { - Self(self.0.with_source_info(source_info)) + fn source_info(self, source_info: SourceInfo) -> Self { + Self(self.0.source_info(source_info)) } #[zenoh_macros::unstable] - fn with_attachment(self, attachment: Attachment) -> Self { - Self(self.0.with_attachment(attachment)) - } - #[zenoh_macros::unstable] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self(self.0.with_attachment_opt(attachment)) + fn attachment>>(self, attachment: T) -> Self { + Self(self.0.attachment(attachment)) } } @@ -288,23 +276,19 @@ impl DeleteSampleBuilder { } impl TimestampBuilderTrait for DeleteSampleBuilder { - fn with_timestamp(self, timestamp: Option) -> Self { - Self(self.0.with_timestamp(timestamp)) + fn timestamp>>(self, timestamp: T) -> Self { + Self(self.0.timestamp(timestamp)) } } impl SampleBuilderTrait for DeleteSampleBuilder { #[zenoh_macros::unstable] - fn with_source_info(self, source_info: SourceInfo) -> Self { - Self(self.0.with_source_info(source_info)) - } - #[zenoh_macros::unstable] - fn with_attachment(self, attachment: Attachment) -> Self { - Self(self.0.with_attachment(attachment)) + fn source_info(self, source_info: SourceInfo) -> Self { + Self(self.0.source_info(source_info)) } #[zenoh_macros::unstable] - fn with_attachment_opt(self, attachment: Option) -> Self { - Self(self.0.with_attachment_opt(attachment)) + fn attachment>>(self, attachment: T) -> Self { + Self(self.0.attachment(attachment)) } } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index ba4c8a7d7c..e87fc5243b 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -38,22 +38,22 @@ fn pubsub() { } zenoh .put("test/attachment", "put") - .with_attachment( + .attachment(Some( backer .iter() .map(|b| (b.0.as_slice(), b.1.as_slice())) .collect(), - ) + )) .res() .unwrap(); publisher .put("publisher") - .with_attachment( + .attachment(Some( backer .iter() .map(|b| (b.0.as_slice(), b.1.as_slice())) .collect(), - ) + )) .res() .unwrap(); } @@ -84,7 +84,7 @@ fn queries() { query.key_expr().clone(), query.value().unwrap().payload.clone(), ) - .with_attachment(attachment) + .attachment(attachment) .res() .unwrap(); }) From fb6509df61afccf4cd983e460553e9f07ce77d25 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 13:21:24 +0100 Subject: [PATCH 064/357] into to encoding returned --- .../src/replica/storage.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/publication.rs | 14 ++++++++++---- zenoh/src/query.rs | 7 ++----- zenoh/src/queryable.rs | 7 ++----- zenoh/src/sample_builder.rs | 17 ++++++----------- zenoh/src/value.rs | 12 ++++++------ 7 files changed, 28 insertions(+), 33 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index de76ade51d..6d31c9710a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -719,7 +719,7 @@ fn construct_update(data: String) -> Update { for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).with_encoding(result.2.into()); + let value = Value::new(payload).with_encoding(result.2); let data = StoredData { value, timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 9047e8b112..caeeb5c89b 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -426,7 +426,7 @@ impl Primitives for AdminSpace { parameters, value: query .ext_body - .map(|b| Value::from(b.payload).with_encoding(b.encoding.into())), + .map(|b| Value::from(b.payload).with_encoding(b.encoding)), qid: msg.id, zid, primitives, diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index cd68530bf7..0e93350222 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -199,8 +199,11 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { } impl ValueBuilderTrait for PutBuilder<'_, '_> { - fn with_encoding(self, encoding: Encoding) -> Self { - Self { encoding, ..self } + fn with_encoding>(self, encoding: T) -> Self { + Self { + encoding: encoding.into(), + ..self + } } fn with_payload(self, payload: IntoPayload) -> Self @@ -779,8 +782,11 @@ impl SampleBuilderTrait for PutPublication<'_> { } impl ValueBuilderTrait for PutPublication<'_> { - fn with_encoding(self, encoding: Encoding) -> Self { - Self { encoding, ..self } + fn with_encoding>(self, encoding: T) -> Self { + Self { + encoding: encoding.into(), + ..self + } } fn with_payload(self, payload: IntoPayload) -> Self diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 2d4e5e1ee3..05f9a3557f 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -169,15 +169,12 @@ impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { } impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { - fn with_encoding(self, encoding: Encoding) -> Self { + fn with_encoding>(self, encoding: T) -> Self { let value = Some(self.value.unwrap_or_default().with_encoding(encoding)); Self { value, ..self } } - fn with_payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { + fn with_payload>(self, payload: T) -> Self { let value = Some(self.value.unwrap_or_default().with_payload(payload)); Self { value, ..self } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 66cb34459b..4f478e1ce7 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -366,17 +366,14 @@ impl QoSBuilderTrait for ReplyBuilder<'_> { } impl ValueBuilderTrait for ReplyBuilder<'_> { - fn with_encoding(self, encoding: Encoding) -> Self { + fn with_encoding>(self, encoding: T) -> Self { Self { sample_builder: self.sample_builder.with_encoding(encoding), ..self } } - fn with_payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { + fn with_payload>(self, payload: T) -> Self { Self { sample_builder: self.sample_builder.with_payload(payload), ..self diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 2d7277506d..a113a9c953 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -41,7 +41,7 @@ pub trait QoSBuilderTrait { } pub trait TimestampBuilderTrait { - /// Sets of clears timestamp + /// Sets of clears timestamp fn timestamp>>(self, timestamp: T) -> Self; } @@ -56,11 +56,9 @@ pub trait SampleBuilderTrait { pub trait ValueBuilderTrait { /// Set the [`Encoding`] - fn with_encoding(self, encoding: Encoding) -> Self; + fn with_encoding>(self, encoding: T) -> Self; /// Sets the payload - fn with_payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into; + fn with_payload>(self, payload: T) -> Self; } #[derive(Debug)] @@ -215,16 +213,13 @@ impl QoSBuilderTrait for PutSampleBuilder { } impl ValueBuilderTrait for PutSampleBuilder { - fn with_encoding(self, encoding: Encoding) -> Self { + fn with_encoding>(self, encoding: T) -> Self { Self(SampleBuilder(Sample { - encoding, + encoding: encoding.into(), ..self.0 .0 })) } - fn with_payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { + fn with_payload>(self, payload: T) -> Self { Self(SampleBuilder(Sample { payload: payload.into(), ..self.0 .0 diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 2d98cbf398..2e288c64ad 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -46,13 +46,13 @@ impl Value { } impl ValueBuilderTrait for Value { - fn with_encoding(self, encoding: Encoding) -> Self { - Self { encoding, ..self } + fn with_encoding>(self, encoding: T) -> Self { + Self { + encoding: encoding.into(), + ..self + } } - fn with_payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { + fn with_payload>(self, payload: T) -> Self { Self { payload: payload.into(), ..self From 2ff6bc22f79f5ab373e1073ae5be1744b646ab49 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 13:23:17 +0100 Subject: [PATCH 065/357] example build fix --- examples/examples/z_pub.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 416ff31f46..7166981e72 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -39,12 +39,12 @@ async fn main() { println!("Putting Data ('{}': '{}')...", &key_expr, buf); let mut put = publisher.put(buf); if let Some(attachment) = &attachment { - put = put.attachment( + put = put.attachment(Some( attachment .split('&') .map(|pair| split_once(pair, '=')) .collect(), - ) + )) } put.res().await.unwrap(); } From 5bbef9c7d4643259a23cde58a20ea08f4a8a464f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 13:24:50 +0100 Subject: [PATCH 066/357] with removed --- plugins/zenoh-plugin-rest/src/lib.rs | 10 ++-------- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 10 +++++----- zenoh/src/net/runtime/adminspace.rs | 4 ++-- zenoh/src/publication.rs | 8 ++++---- zenoh/src/query.rs | 8 ++++---- zenoh/src/queryable.rs | 10 +++++----- zenoh/src/sample.rs | 2 +- zenoh/src/sample_builder.rs | 8 ++++---- zenoh/src/value.rs | 4 ++-- 11 files changed, 31 insertions(+), 37 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 74da23679f..c90bbe5ac1 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -420,7 +420,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { @@ -464,13 +464,7 @@ async fn write(mut req: Request<(Arc, String)>) -> tide::Result { - session - .put(&key_expr, bytes) - .with_encoding(encoding) - .res() - .await - } + SampleKind::Put => session.put(&key_expr, bytes).encoding(encoding).res().await, SampleKind::Delete => session.delete(&key_expr).res().await, }; match res { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 973fb89abe..b2d2bdc399 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -129,7 +129,7 @@ impl AlignQueryable { AlignData::Data(k, (v, ts)) => { query .reply(k, v.payload) - .with_encoding(v.encoding) + .encoding(v.encoding) .timestamp(ts) .res() .await diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 9d5257e53f..6527d54c66 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -111,7 +111,7 @@ impl Aligner { payload, encoding, .. } = value; let sample = PutSampleBuilder::new(key, payload) - .with_encoding(encoding) + .encoding(encoding) .timestamp(ts) .res_sync(); log::debug!("[ALIGNER] Adding {:?} to storage", sample); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 6d31c9710a..8e60ee320e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -315,7 +315,7 @@ impl StorageService { payload, encoding, .. } = data.value; PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) - .with_encoding(encoding) + .encoding(encoding) .timestamp(data.timestamp) .res_sync() } @@ -344,7 +344,7 @@ impl StorageService { .put( stripped_key, Value::new(sample_to_store.payload().clone()) - .with_encoding(sample_to_store.encoding().clone()), + .encoding(sample_to_store.encoding().clone()), *sample_to_store.timestamp().unwrap(), ) .await @@ -532,7 +532,7 @@ impl StorageService { } = entry.value; if let Err(e) = q .reply(key.clone(), payload) - .with_encoding(encoding) + .encoding(encoding) .timestamp(entry.timestamp) .res_async() .await @@ -567,7 +567,7 @@ impl StorageService { } = entry.value; if let Err(e) = q .reply(q.key_expr().clone(), payload) - .with_encoding(encoding) + .encoding(encoding) .timestamp(entry.timestamp) .res_async() .await @@ -719,7 +719,7 @@ fn construct_update(data: String) -> Update { for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).with_encoding(result.2); + let value = Value::new(payload).encoding(result.2); let data = StoredData { value, timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index caeeb5c89b..070b3bcd3a 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -426,7 +426,7 @@ impl Primitives for AdminSpace { parameters, value: query .ext_body - .map(|b| Value::from(b.payload).with_encoding(b.encoding)), + .map(|b| Value::from(b.payload).encoding(b.encoding)), qid: msg.id, zid, primitives, @@ -578,7 +578,7 @@ fn router_data(context: &AdminContext, query: Query) { }; if let Err(e) = query .reply(reply_key, payload) - .with_encoding(Encoding::APPLICATION_JSON) + .encoding(Encoding::APPLICATION_JSON) .res_sync() { log::error!("Error sending AdminSpace reply: {:?}", e); diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 0e93350222..8f52d5e4fa 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -199,14 +199,14 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { } impl ValueBuilderTrait for PutBuilder<'_, '_> { - fn with_encoding>(self, encoding: T) -> Self { + fn encoding>(self, encoding: T) -> Self { Self { encoding: encoding.into(), ..self } } - fn with_payload(self, payload: IntoPayload) -> Self + fn payload(self, payload: IntoPayload) -> Self where IntoPayload: Into, { @@ -782,14 +782,14 @@ impl SampleBuilderTrait for PutPublication<'_> { } impl ValueBuilderTrait for PutPublication<'_> { - fn with_encoding>(self, encoding: T) -> Self { + fn encoding>(self, encoding: T) -> Self { Self { encoding: encoding.into(), ..self } } - fn with_payload(self, payload: IntoPayload) -> Self + fn payload(self, payload: IntoPayload) -> Self where IntoPayload: Into, { diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 05f9a3557f..837ed69f22 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -169,13 +169,13 @@ impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { } impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { - fn with_encoding>(self, encoding: T) -> Self { - let value = Some(self.value.unwrap_or_default().with_encoding(encoding)); + fn encoding>(self, encoding: T) -> Self { + let value = Some(self.value.unwrap_or_default().encoding(encoding)); Self { value, ..self } } - fn with_payload>(self, payload: T) -> Self { - let value = Some(self.value.unwrap_or_default().with_payload(payload)); + fn payload>(self, payload: T) -> Self { + let value = Some(self.value.unwrap_or_default().payload(payload)); Self { value, ..self } } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 4f478e1ce7..37f914d0e0 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -228,7 +228,7 @@ impl<'a> ReplySampleBuilder<'a> { query: self.query, sample_builder: self.sample_builder.into(), }; - builder.with_payload(payload) + builder.payload(payload) } pub fn delete(self) -> ReplyDelBuilder<'a> { ReplyDelBuilder { @@ -366,16 +366,16 @@ impl QoSBuilderTrait for ReplyBuilder<'_> { } impl ValueBuilderTrait for ReplyBuilder<'_> { - fn with_encoding>(self, encoding: T) -> Self { + fn encoding>(self, encoding: T) -> Self { Self { - sample_builder: self.sample_builder.with_encoding(encoding), + sample_builder: self.sample_builder.encoding(encoding), ..self } } - fn with_payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { Self { - sample_builder: self.sample_builder.with_payload(payload), + sample_builder: self.sample_builder.payload(payload), ..self } } diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 163ae2090a..813bc1c63e 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -570,7 +570,7 @@ impl Sample { impl From for Value { fn from(sample: Sample) -> Self { - Value::new(sample.payload).with_encoding(sample.encoding) + Value::new(sample.payload).encoding(sample.encoding) } } diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index a113a9c953..0996f17cf9 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -56,9 +56,9 @@ pub trait SampleBuilderTrait { pub trait ValueBuilderTrait { /// Set the [`Encoding`] - fn with_encoding>(self, encoding: T) -> Self; + fn encoding>(self, encoding: T) -> Self; /// Sets the payload - fn with_payload>(self, payload: T) -> Self; + fn payload>(self, payload: T) -> Self; } #[derive(Debug)] @@ -213,13 +213,13 @@ impl QoSBuilderTrait for PutSampleBuilder { } impl ValueBuilderTrait for PutSampleBuilder { - fn with_encoding>(self, encoding: T) -> Self { + fn encoding>(self, encoding: T) -> Self { Self(SampleBuilder(Sample { encoding: encoding.into(), ..self.0 .0 })) } - fn with_payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { Self(SampleBuilder(Sample { payload: payload.into(), ..self.0 .0 diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 2e288c64ad..6d4de1366c 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -46,13 +46,13 @@ impl Value { } impl ValueBuilderTrait for Value { - fn with_encoding>(self, encoding: T) -> Self { + fn encoding>(self, encoding: T) -> Self { Self { encoding: encoding.into(), ..self } } - fn with_payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { Self { payload: payload.into(), ..self From 9809799b36a6210a9e1f2bbb5e5314540ddb0589 Mon Sep 17 00:00:00 2001 From: Alexander Date: Thu, 28 Mar 2024 13:25:01 +0100 Subject: [PATCH 067/357] Add protocol version to error message (#871) --- io/zenoh-transport/src/unicast/establishment/accept.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 72e676f6ec..7648f16e7d 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -167,9 +167,11 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Check if the version is supported if init_syn.version != input.mine_version { let e = zerror!( - "Rejecting InitSyn on {} because of unsupported Zenoh version from peer: {}", + "Rejecting InitSyn on {} because of unsupported Zenoh protocol version (expected: {}, received: {}) from: {}", self.link, - init_syn.zid + input.mine_version, + init_syn.version, + init_syn.zid, ); return Err((e.into(), Some(close::reason::INVALID))); } From c427ac732861fd775f1b275ca7948719f16fbad5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 13:42:52 +0100 Subject: [PATCH 068/357] resolvable removed from simple builders --- .../src/replica/aligner.rs | 5 +- .../src/replica/storage.rs | 29 ++++----- zenoh-ext/src/querying_subscriber.rs | 5 +- zenoh/src/queryable.rs | 9 +-- zenoh/src/sample.rs | 18 ------ zenoh/src/sample_builder.rs | 60 ++++--------------- 6 files changed, 32 insertions(+), 94 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 6527d54c66..3a6cc0444d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -24,7 +24,6 @@ use zenoh::prelude::r#async::*; use zenoh::sample_builder::{PutSampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::time::Timestamp; use zenoh::Session; -use zenoh_core::{AsyncResolve, SyncResolve}; pub struct Aligner { session: Arc, @@ -113,7 +112,7 @@ impl Aligner { let sample = PutSampleBuilder::new(key, payload) .encoding(encoding) .timestamp(ts) - .res_sync(); + .into(); log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { log::error!("[ALIGNER] Error adding sample to storage: {}", e) @@ -331,7 +330,7 @@ impl Aligner { .get(&selector) .consolidation(zenoh::query::ConsolidationMode::None) .accept_replies(zenoh::query::ReplyKeyExpr::Any) - .res_async() + .res() .await { Ok(replies) => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 8e60ee320e..9e9f8914d0 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,6 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::key_expr::KeyExpr; +use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::{Sample, SampleKind}; use zenoh::sample_builder::{ @@ -34,7 +35,6 @@ use zenoh::value::Value; use zenoh::{Result as ZResult, Session, SessionDeclarations}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_core::{AsyncResolve, SyncResolve}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -144,12 +144,7 @@ impl StorageService { t.add_async(gc).await; // subscribe on key_expr - let storage_sub = match self - .session - .declare_subscriber(&self.key_expr) - .res_async() - .await - { + let storage_sub = match self.session.declare_subscriber(&self.key_expr).res().await { Ok(storage_sub) => storage_sub, Err(e) => { log::error!("Error starting storage '{}': {}", self.name, e); @@ -162,7 +157,7 @@ impl StorageService { .session .declare_queryable(&self.key_expr) .complete(self.complete) - .res_async() + .res() .await { Ok(storage_queryable) => storage_queryable, @@ -239,7 +234,7 @@ impl StorageService { } }; let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let sample = SampleBuilder::from(sample).timestamp(timestamp).res_sync(); + let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); self.process_sample(sample).await; }, // on query on key_expr @@ -303,7 +298,7 @@ impl StorageService { ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store = match self + let sample_to_store: Sample = match self .ovderriding_wild_update(&k, sample.timestamp().unwrap()) .await { @@ -317,17 +312,17 @@ impl StorageService { PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) .encoding(encoding) .timestamp(data.timestamp) - .res_sync() + .into() } Some(Update { kind: SampleKind::Delete, data, }) => DeleteSampleBuilder::new(KeyExpr::from(k.clone())) .timestamp(data.timestamp) - .res_sync(), + .into(), None => SampleBuilder::from(sample.clone()) .keyexpr(k.clone()) - .res_sync(), + .into(), }; let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { @@ -534,7 +529,7 @@ impl StorageService { .reply(key.clone(), payload) .encoding(encoding) .timestamp(entry.timestamp) - .res_async() + .res() .await { log::warn!( @@ -569,7 +564,7 @@ impl StorageService { .reply(q.key_expr().clone(), payload) .encoding(encoding) .timestamp(entry.timestamp) - .res_async() + .res() .await { log::warn!( @@ -584,7 +579,7 @@ impl StorageService { let err_message = format!("Storage '{}' raised an error on query: {}", self.name, e); log::warn!("{}", err_message); - if let Err(e) = q.reply_err(err_message).res_async().await { + if let Err(e) = q.reply_err(err_message).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -666,7 +661,7 @@ impl StorageService { .get(KeyExpr::from(&self.key_expr).with_parameters("_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) - .res_async() + .res() .await { Ok(replies) => replies, diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 52a4263396..728e9cfa51 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -665,8 +665,9 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. let timestamp = s.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let s = SampleBuilder::from(s).timestamp(timestamp).res_sync(); - state.merge_queue.push(s); + state + .merge_queue + .push(SampleBuilder::from(s).timestamp(timestamp).into()); } } }; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 37f914d0e0..a52c96c871 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -294,8 +294,7 @@ impl Resolvable for ReplySampleBuilder<'_> { impl SyncResolve for ReplySampleBuilder<'_> { fn res_sync(self) -> ::To { - let sample = self.sample_builder.res_sync(); - self.query._reply_sample(sample) + self.query._reply_sample(self.sample_builder.into()) } } @@ -453,8 +452,7 @@ impl<'a> Resolvable for ReplyBuilder<'a> { impl SyncResolve for ReplyBuilder<'_> { fn res_sync(self) -> ::To { - let sample = self.sample_builder.res_sync(); - self.query._reply_sample(sample) + self.query._reply_sample(self.sample_builder.into()) } } @@ -464,8 +462,7 @@ impl<'a> Resolvable for ReplyDelBuilder<'a> { impl SyncResolve for ReplyDelBuilder<'_> { fn res_sync(self) -> ::To { - let sample = self.sample_builder.res_sync(); - self.query._reply_sample(sample) + self.query._reply_sample(self.sample_builder.into()) } } diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 813bc1c63e..870b25768e 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -22,7 +22,6 @@ use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::ext::QoSType; use zenoh_protocol::{core::CongestionControl, zenoh}; @@ -595,23 +594,6 @@ impl From for QoS { } } -impl Resolvable for QoSBuilder { - type To = QoS; -} - -impl SyncResolve for QoSBuilder { - fn res_sync(self) -> ::To { - self.0 - } -} - -impl AsyncResolve for QoSBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - futures::future::ready(self.0) - } -} - impl QoSBuilderTrait for QoSBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { let mut inner = self.0.inner; diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample_builder.rs index 0996f17cf9..f74afdf2b3 100644 --- a/zenoh/src/sample_builder.rs +++ b/zenoh/src/sample_builder.rs @@ -24,9 +24,6 @@ use crate::Sample; use crate::SampleKind; use uhlc::Timestamp; use zenoh_core::zresult; -use zenoh_core::AsyncResolve; -use zenoh_core::Resolvable; -use zenoh_core::SyncResolve; use zenoh_protocol::core::CongestionControl; pub trait QoSBuilderTrait { @@ -124,17 +121,17 @@ impl SampleBuilderTrait for SampleBuilder { impl QoSBuilderTrait for SampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.congestion_control(congestion_control).res_sync(); + let qos = qos.congestion_control(congestion_control).into(); Self(Sample { qos, ..self.0 }) } fn priority(self, priority: Priority) -> Self { let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.priority(priority).res_sync(); + let qos = qos.priority(priority).into(); Self(Sample { qos, ..self.0 }) } fn express(self, is_express: bool) -> Self { let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.express(is_express).res_sync(); + let qos = qos.express(is_express).into(); Self(Sample { qos, ..self.0 }) } } @@ -325,53 +322,20 @@ impl TryFrom for DeleteSampleBuilder { } } -impl Resolvable for SampleBuilder { - type To = Sample; -} - -impl Resolvable for PutSampleBuilder { - type To = Sample; -} - -impl Resolvable for DeleteSampleBuilder { - type To = Sample; -} - -impl SyncResolve for SampleBuilder { - fn res_sync(self) -> Self::To { - self.0 - } -} - -impl SyncResolve for PutSampleBuilder { - fn res_sync(self) -> Self::To { - self.0.res_sync() - } -} - -impl SyncResolve for DeleteSampleBuilder { - fn res_sync(self) -> Self::To { - self.0.res_sync() - } -} - -impl AsyncResolve for SampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - futures::future::ready(self.0) +impl From for Sample { + fn from(sample_builder: SampleBuilder) -> Self { + sample_builder.0 } } -impl AsyncResolve for PutSampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - self.0.res_async() +impl From for Sample { + fn from(put_sample_builder: PutSampleBuilder) -> Self { + put_sample_builder.0 .0 } } -impl AsyncResolve for DeleteSampleBuilder { - type Future = futures::future::Ready; - fn res_async(self) -> Self::Future { - self.0.res_async() +impl From for Sample { + fn from(delete_sample_builder: DeleteSampleBuilder) -> Self { + delete_sample_builder.0 .0 } } From 6c6050b477a9f69040bd0f67748e15b7eeca242a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 28 Mar 2024 15:15:26 +0100 Subject: [PATCH 069/357] Fix cargo clippy --- zenoh/tests/connection_retry.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index db84d7bd5d..4f789e6db1 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -36,9 +36,7 @@ fn retry_config_overriding() { .insert_json5("listen/exit_on_failure", "false") .unwrap(); - let expected = vec![ - // global value - ConnectionRetryConf { + let expected = [ConnectionRetryConf { period_init_ms: 3000, period_max_ms: 6000, period_increase_factor: 1.5, @@ -57,8 +55,7 @@ fn retry_config_overriding() { period_max_ms: 60000, period_increase_factor: 15., exit_on_failure: true, - }, - ]; + }]; for (i, endpoint) in config.listen().endpoints().iter().enumerate() { let retry_config = zenoh_config::get_retry_config(&config, Some(endpoint), true); From 1a2ba1a75358d3703265dccbb3707680988a2647 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 28 Mar 2024 15:21:43 +0100 Subject: [PATCH 070/357] Fix code format --- zenoh/tests/connection_retry.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 4f789e6db1..fcb071b489 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -36,7 +36,8 @@ fn retry_config_overriding() { .insert_json5("listen/exit_on_failure", "false") .unwrap(); - let expected = [ConnectionRetryConf { + let expected = [ + ConnectionRetryConf { period_init_ms: 3000, period_max_ms: 6000, period_increase_factor: 1.5, @@ -55,7 +56,8 @@ fn retry_config_overriding() { period_max_ms: 60000, period_increase_factor: 15., exit_on_failure: true, - }]; + }, + ]; for (i, endpoint) in config.listen().endpoints().iter().enumerate() { let retry_config = zenoh_config::get_retry_config(&config, Some(endpoint), true); From 7162ff13f34a27ff7455b447536522adc23bf7a5 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 28 Mar 2024 15:43:35 +0100 Subject: [PATCH 071/357] Fix cargo clippy --- zenoh/src/sample.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 5f0234f723..2af8fb7106 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -69,8 +69,12 @@ pub struct SourceInfo { #[test] #[cfg(feature = "unstable")] -#[cfg(not(all(target_os = "macos", target_arch = "aarch64")))] fn source_info_stack_size() { + use crate::{ + sample::{SourceInfo, SourceSn}, + ZenohId, + }; + assert_eq!(std::mem::size_of::(), 16); assert_eq!(std::mem::size_of::>(), 17); assert_eq!(std::mem::size_of::>(), 16); From 10baf8c9cf6050dc6c7f682a3d444710fdb93aea Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 16:04:11 +0100 Subject: [PATCH 072/357] doctests fixed --- zenoh/src/publication.rs | 6 +++--- zenoh/src/session.rs | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 8f52d5e4fa..f8f15eca56 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -73,12 +73,12 @@ pub struct DeleteBuilder<'a, 'b> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; -/// use zenoh::sample_builder::{PutSampleBuilderTrait, QoSBuilderTrait}; +/// use zenoh::sample_builder::{ValueBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session /// .put("key/expression", "payload") -/// .with_encoding(Encoding::TEXT_PLAIN) +/// .encoding(Encoding::TEXT_PLAIN) /// .congestion_control(CongestionControl::Block) /// .res() /// .await @@ -932,7 +932,7 @@ impl<'a> Sink for Publisher<'a> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; -/// use zenoh::sample_builder::{PutSampleBuilderTrait, QoSBuilderTrait}; +/// use zenoh::sample_builder::QoSBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 5b80adb0e5..cc30e12293 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -683,12 +683,13 @@ impl Session { /// ``` /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; - /// use zenoh::sample_builder::PutSampleBuilderTrait; + /// use zenoh::sample_builder::SampleBuilderTrait; + /// use zenoh::sample_builder::ValueBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session /// .put("key/expression", "payload") - /// .with_encoding(Encoding::TEXT_PLAIN) + /// .encoding(Encoding::TEXT_PLAIN) /// .res() /// .await /// .unwrap(); From 48cb96ba7ab43c13a212fe4bb5943edb38089b9b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 16:41:55 +0100 Subject: [PATCH 073/357] sample bulider in separarte module --- zenoh/src/{sample_builder.rs => sample/builder.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{sample_builder.rs => sample/builder.rs} (100%) diff --git a/zenoh/src/sample_builder.rs b/zenoh/src/sample/builder.rs similarity index 100% rename from zenoh/src/sample_builder.rs rename to zenoh/src/sample/builder.rs From ddb93a2364bbe4db227d54b1107539b717fa0d83 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 16:42:05 +0100 Subject: [PATCH 074/357] separate module --- examples/examples/z_ping.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub.rs | 2 +- examples/examples/z_pub_shm_thr.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- .../src/replica/align_queryable.rs | 4 ++-- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 4 ++-- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/lib.rs | 1 - zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 4 ++++ zenoh/src/publication.rs | 10 +++++----- zenoh/src/query.rs | 2 +- zenoh/src/queryable.rs | 4 ++-- zenoh/src/sample.rs | 4 +++- zenoh/src/session.rs | 3 +-- zenoh/src/value.rs | 2 +- zenoh/tests/attachments.rs | 4 ++-- zenoh/tests/qos.rs | 2 +- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 2 +- zenoh/tests/unicity.rs | 2 +- 26 files changed, 38 insertions(+), 34 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index b40afc1f53..59bcaddadc 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -16,7 +16,7 @@ use std::time::{Duration, Instant}; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 0003958b5d..e0fa079629 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -15,7 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 7166981e72..c4c592b47c 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -16,7 +16,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::SampleBuilderTrait; +use zenoh::sample::builder::SampleBuilderTrait; use zenoh_examples::CommonArgs; #[async_std::main] diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 5230ea3ce6..a784429906 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -15,7 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 7e7c1ac9b5..78d54111a8 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -16,7 +16,7 @@ use clap::Parser; use std::convert::TryInto; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 48f152e488..c353826fab 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,7 +15,7 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{config::Config, key_expr::keyexpr}; const HTML: &str = r#" diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index c90bbe5ac1..94796c518d 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -34,7 +34,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; -use zenoh::sample_builder::ValueBuilderTrait; +use zenoh::sample::builder::ValueBuilderTrait; use zenoh::selector::TIME_RANGE_KEY; use zenoh::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index b2d2bdc399..729572601c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,8 +20,8 @@ use std::str; use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::TimestampBuilderTrait; -use zenoh::sample_builder::ValueBuilderTrait; +use zenoh::sample::builder::TimestampBuilderTrait; +use zenoh::sample::builder::ValueBuilderTrait; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 3a6cc0444d..1b7f945cee 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::{PutSampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; +use zenoh::sample::builder::{PutSampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 9e9f8914d0..62468ac6a1 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -26,10 +26,10 @@ use zenoh::buffers::ZBuf; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::{Sample, SampleKind}; -use zenoh::sample_builder::{ +use zenoh::sample::builder::{ DeleteSampleBuilder, PutSampleBuilder, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait, }; +use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; use zenoh::{Result as ZResult, Session, SessionDeclarations}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 39cd982c41..4ae3c77c9f 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -29,7 +29,7 @@ use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh::Error as ZError; use zenoh::Result as ZResult; use zenoh::Session; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 728e9cfa51..5e80cb704c 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,7 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample_builder::{SampleBuilder, TimestampBuilderTrait}; +use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 24b21496ec..ed2f01f180 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -145,7 +145,6 @@ pub mod publication; pub mod query; pub mod queryable; pub mod sample; -pub mod sample_builder; pub mod subscriber; pub mod value; #[cfg(feature = "shared-memory")] diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 070b3bcd3a..41295f6cd0 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -20,7 +20,7 @@ use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use crate::queryable::Query; use crate::queryable::QueryInner; -use crate::sample_builder::ValueBuilderTrait; +use crate::sample::builder::ValueBuilderTrait; use crate::value::Value; use async_std::task; use log::{error, trace}; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 26c93e1801..850148f506 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -60,6 +60,10 @@ pub(crate) mod common { #[zenoh_macros::unstable] pub use crate::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; + + pub use crate::sample::builder::{ + QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + }; } /// Prelude to import when using Zenoh's sync API. diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f8f15eca56..d2463610fb 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -15,12 +15,12 @@ //! Publishing primitives. use crate::net::primitives::Primitives; use crate::prelude::*; +use crate::sample::builder::{ + QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, +}; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; -use crate::sample_builder::{ - QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, -}; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] @@ -73,7 +73,7 @@ pub struct DeleteBuilder<'a, 'b> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; -/// use zenoh::sample_builder::{ValueBuilderTrait, QoSBuilderTrait}; +/// use zenoh::sample::builder::{ValueBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session @@ -932,7 +932,7 @@ impl<'a> Sink for Publisher<'a> { /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; -/// use zenoh::sample_builder::QoSBuilderTrait; +/// use zenoh::sample::builder::QoSBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 837ed69f22..3a7ee771b3 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -15,10 +15,10 @@ //! Query primitives. use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; +use crate::sample::builder::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::QoSBuilder; -use crate::sample_builder::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}; use crate::Session; use std::collections::HashMap; use std::future::Ready; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index a52c96c871..2e3a1f585a 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,11 +18,11 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::SourceInfo; -use crate::sample_builder::{ +use crate::sample::builder::{ DeleteSampleBuilder, PutSampleBuilder, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; +use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index 870b25768e..455d54318b 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -16,7 +16,7 @@ use crate::encoding::Encoding; use crate::payload::Payload; use crate::prelude::{KeyExpr, Value}; -use crate::sample_builder::{QoSBuilderTrait, ValueBuilderTrait}; +use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; use crate::time::Timestamp; use crate::Priority; #[zenoh_macros::unstable] @@ -26,6 +26,8 @@ use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::ext::QoSType; use zenoh_protocol::{core::CongestionControl, zenoh}; +pub mod builder; + pub type SourceSn = u64; /// The locality of samples to be received by subscribers or targeted by publishers. diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index cc30e12293..2f24673b5e 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -683,8 +683,7 @@ impl Session { /// ``` /// # async_std::task::block_on(async { /// use zenoh::prelude::r#async::*; - /// use zenoh::sample_builder::SampleBuilderTrait; - /// use zenoh::sample_builder::ValueBuilderTrait; + /// use zenoh::prelude::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 6d4de1366c..8ea5aef19f 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,7 +13,7 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload, sample_builder::ValueBuilderTrait}; +use crate::{encoding::Encoding, payload::Payload, sample::builder::ValueBuilderTrait}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index e87fc5243b..f50e33cf6f 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,7 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::{prelude::sync::*, sample_builder::SampleBuilderTrait}; + use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -61,7 +61,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{prelude::sync::*, sample::Attachment, sample_builder::SampleBuilderTrait}; + use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait, sample::Attachment}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 8dc39423cb..46896e5432 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -15,7 +15,7 @@ use async_std::prelude::FutureExt; use async_std::task; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{publication::Priority, SessionDeclarations}; use zenoh_core::zasync_executor_init; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 123550852e..6585f8aae4 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -20,7 +20,7 @@ use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{value::Value, Result}; use zenoh_core::zasync_executor_init; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 955ec7a73f..436643ac25 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -17,7 +17,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh_core::zasync_executor_init; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 3d1327398d..80f722205b 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -17,7 +17,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh::sample_builder::QoSBuilderTrait; +use zenoh::sample::builder::QoSBuilderTrait; use zenoh_core::zasync_executor_init; const TIMEOUT: Duration = Duration::from_secs(60); From ab96aab5345e7556c0c6ae1329c46efe45a31b63 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 16:53:43 +0100 Subject: [PATCH 075/357] SampleBuilder put/delete --- .../src/replica/aligner.rs | 4 +-- .../src/replica/storage.rs | 8 +++--- zenoh/src/queryable.rs | 4 +-- zenoh/src/sample/builder.rs | 27 +++++++++---------- 4 files changed, 20 insertions(+), 23 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 1b7f945cee..5121f0b445 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::{PutSampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; +use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::time::Timestamp; use zenoh::Session; @@ -109,7 +109,7 @@ impl Aligner { let Value { payload, encoding, .. } = value; - let sample = PutSampleBuilder::new(key, payload) + let sample = SampleBuilder::put(key, payload) .encoding(encoding) .timestamp(ts) .into(); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 62468ac6a1..feebfb588a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -26,9 +26,7 @@ use zenoh::buffers::ZBuf; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::builder::{ - DeleteSampleBuilder, PutSampleBuilder, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait, -}; +use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; @@ -309,7 +307,7 @@ impl StorageService { let Value { payload, encoding, .. } = data.value; - PutSampleBuilder::new(KeyExpr::from(k.clone()), payload) + SampleBuilder::put(KeyExpr::from(k.clone()), payload) .encoding(encoding) .timestamp(data.timestamp) .into() @@ -317,7 +315,7 @@ impl StorageService { Some(Update { kind: SampleKind::Delete, data, - }) => DeleteSampleBuilder::new(KeyExpr::from(k.clone())) + }) => SampleBuilder::delete(KeyExpr::from(k.clone())) .timestamp(data.timestamp) .into(), None => SampleBuilder::from(sample.clone()) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 2e3a1f585a..c9492394c4 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -137,8 +137,8 @@ impl Query { IntoKeyExpr: Into>, IntoPayload: Into, { - let sample_builder = PutSampleBuilder::new(key_expr, payload) - .with_qos(response::ext::QoSType::RESPONSE.into()); + let sample_builder = + SampleBuilder::put(key_expr, payload).with_qos(response::ext::QoSType::RESPONSE.into()); ReplyBuilder { query: self, sample_builder, diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index f74afdf2b3..8c507c8119 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -62,22 +62,21 @@ pub trait ValueBuilderTrait { pub struct SampleBuilder(Sample); impl SampleBuilder { - pub fn new(key_expr: IntoKeyExpr) -> Self + pub fn put( + key_expr: IntoKeyExpr, + payload: IntoPayload, + ) -> PutSampleBuilder where IntoKeyExpr: Into>, + IntoPayload: Into, { - Self(Sample { - key_expr: key_expr.into(), - payload: Payload::empty(), - kind: SampleKind::default(), - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - }) + PutSampleBuilder::new(key_expr, payload) + } + pub fn delete(key_expr: IntoKeyExpr) -> DeleteSampleBuilder + where + IntoKeyExpr: Into>, + { + DeleteSampleBuilder::new(key_expr) } /// Allows to change keyexpr of [`Sample`] pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self @@ -149,7 +148,7 @@ impl From for PutSampleBuilder { } impl PutSampleBuilder { - pub fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self + fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self where IntoKeyExpr: Into>, IntoPayload: Into, From 82c1c999d0f73cc2cc09121e56067591971f5146 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 17:28:56 +0100 Subject: [PATCH 076/357] set value api --- examples/examples/z_get.rs | 21 +++++++----- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- zenoh/src/publication.rs | 17 ++++++++++ zenoh/src/query.rs | 49 ++++++++++++---------------- zenoh/src/queryable.rs | 7 ++++ zenoh/src/sample/builder.rs | 12 +++++++ zenoh/src/value.rs | 18 ++++++++++ zenoh/tests/attachments.rs | 6 ++-- zenoh/tests/handler.rs | 4 +-- 9 files changed, 93 insertions(+), 43 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index dce74d367b..074f931eff 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -28,15 +28,18 @@ async fn main() { let session = zenoh::open(config).res().await.unwrap(); println!("Sending Query '{selector}'..."); - let replies = match value { - Some(value) => session.get(&selector).with_value(value), - None => session.get(&selector), - } - .target(target) - .timeout(timeout) - .res() - .await - .unwrap(); + // let replies = match value { + // Some(value) => session.get(&selector).payload(value), + // None => session.get(&selector), + // } + let replies = session + .get(&selector) + .value(value.map(Value::from)) + .target(target) + .timeout(timeout) + .res() + .await + .unwrap(); while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 94796c518d..f78c541eff 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -420,7 +420,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index d2463610fb..103a65e782 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -215,6 +215,14 @@ impl ValueBuilderTrait for PutBuilder<'_, '_> { ..self } } + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self { + payload, + encoding, + ..self + } + } } impl PutBuilder<'_, '_> { @@ -798,6 +806,15 @@ impl ValueBuilderTrait for PutPublication<'_> { ..self } } + + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self { + payload, + encoding, + ..self + } + } } impl TimestampBuilderTrait for DeletePublication<'_> { diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 3a7ee771b3..5a1d443463 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -178,6 +178,13 @@ impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { let value = Some(self.value.unwrap_or_default().payload(payload)); Self { value, ..self } } + fn value>(self, value: T) -> Self { + let value: Value = value.into(); + Self { + value: if value.is_empty() { None } else { Some(value) }, + ..self + } + } } impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { @@ -328,48 +335,34 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { /// Change the target of the query. #[inline] - pub fn target(mut self, target: QueryTarget) -> Self { - self.target = target; - self + pub fn target(self, target: QueryTarget) -> Self { + Self { target, ..self } } /// Change the consolidation mode of the query. #[inline] - pub fn consolidation>(mut self, consolidation: QC) -> Self { - self.consolidation = consolidation.into(); - self + pub fn consolidation>(self, consolidation: QC) -> Self { + Self { + consolidation: consolidation.into(), + ..self + } } /// Restrict the matching queryables that will receive the query /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.destination = destination; - self + pub fn allowed_destination(self, destination: Locality) -> Self { + Self { + destination, + ..self + } } /// Set query timeout. #[inline] - pub fn timeout(mut self, timeout: Duration) -> Self { - self.timeout = timeout; - self - } - - /// Set query value. - #[inline] - pub fn with_value(mut self, value: IntoValue) -> Self - where - IntoValue: Into, - { - self.value = Some(value.into()); - self - } - - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self + pub fn timeout(self, timeout: Duration) -> Self { + Self { timeout, ..self } } /// By default, `get` guarantees that it will only receive replies whose key expressions intersect diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index c9492394c4..aa5f041a2b 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -378,6 +378,13 @@ impl ValueBuilderTrait for ReplyBuilder<'_> { ..self } } + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self { + sample_builder: self.sample_builder.payload(payload).encoding(encoding), + ..self + } + } } /// A builder returned by [`Query::reply_del()`](Query::reply) diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 8c507c8119..1bd50e7f69 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -22,6 +22,7 @@ use crate::Payload; use crate::Priority; use crate::Sample; use crate::SampleKind; +use crate::Value; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; @@ -56,6 +57,9 @@ pub trait ValueBuilderTrait { fn encoding>(self, encoding: T) -> Self; /// Sets the payload fn payload>(self, payload: T) -> Self; + /// Sets both payload and encoding at once. + /// This is convenient for passing user type which supports `Into` when both payload and encoding depends on user type + fn value>(self, value: T) -> Self; } #[derive(Debug)] @@ -221,6 +225,14 @@ impl ValueBuilderTrait for PutSampleBuilder { ..self.0 .0 })) } + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self(SampleBuilder(Sample { + payload, + encoding, + ..self.0 .0 + })) + } } #[derive(Debug)] diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 8ea5aef19f..92a87cb6c5 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -43,6 +43,11 @@ impl Value { encoding: Encoding::default(), } } + /// Checks if the [`Value`] is empty. + /// Value is considered empty if its payload is empty and encoding is default. + pub fn is_empty(&self) -> bool { + self.payload.is_empty() && self.encoding == Encoding::default() + } } impl ValueBuilderTrait for Value { @@ -58,6 +63,10 @@ impl ValueBuilderTrait for Value { ..self } } + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self { payload, encoding } + } } impl From for Value @@ -72,6 +81,15 @@ where } } +impl From> for Value +where + T: Into, +{ + fn from(t: Option) -> Self { + t.map_or_else(Value::empty, Into::into) + } +} + impl Default for Value { fn default() -> Self { Value::empty() diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index f50e33cf6f..2725351ab0 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -100,13 +100,13 @@ fn queries() { } let get = zenoh .get("test/attachment") - .with_value("query") - .with_attachment( + .payload("query") + .attachment(Some( backer .iter() .map(|b| (b.0.as_slice(), b.1.as_slice())) .collect(), - ) + )) .res() .unwrap(); while let Ok(reply) = get.recv() { diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index c1e912fc75..ceed15e2c3 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -57,12 +57,12 @@ fn query_with_ringbuffer() { let _reply1 = zenoh .get("test/ringbuffer_query") - .with_value("query1") + .payload("query1") .res() .unwrap(); let _reply2 = zenoh .get("test/ringbuffer_query") - .with_value("query2") + .payload("query2") .res() .unwrap(); From b5a1f6b1eb3fd3310f233d54abc9135449d4630a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 17:32:48 +0100 Subject: [PATCH 077/357] with removed --- zenoh/src/queryable.rs | 2 +- zenoh/src/sample/builder.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index aa5f041a2b..aec45c46df 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -138,7 +138,7 @@ impl Query { IntoPayload: Into, { let sample_builder = - SampleBuilder::put(key_expr, payload).with_qos(response::ext::QoSType::RESPONSE.into()); + SampleBuilder::put(key_expr, payload).qos(response::ext::QoSType::RESPONSE.into()); ReplyBuilder { query: self, sample_builder, diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 1bd50e7f69..920bd2b7b7 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -171,14 +171,14 @@ impl PutSampleBuilder { })) } /// Allows to change keyexpr of [`Sample`] - pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self + pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { Self(self.0.keyexpr(key_expr)) } // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. - pub(crate) fn with_qos(self, qos: QoS) -> Self { + pub(crate) fn qos(self, qos: QoS) -> Self { Self(SampleBuilder(Sample { qos, ..self.0 .0 })) } } From 1c9515704f25020468a22bf0dfe52d8cc0fb17cb Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 17:37:48 +0100 Subject: [PATCH 078/357] commented code removed --- examples/examples/z_get.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 074f931eff..259137ee4a 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -28,10 +28,6 @@ async fn main() { let session = zenoh::open(config).res().await.unwrap(); println!("Sending Query '{selector}'..."); - // let replies = match value { - // Some(value) => session.get(&selector).payload(value), - // None => session.get(&selector), - // } let replies = session .get(&selector) .value(value.map(Value::from)) From d9eb96a8d86c232513f6c93b1d8a3d2f57ef9f1a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 17:45:07 +0100 Subject: [PATCH 079/357] map-from removed --- examples/examples/z_get.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 259137ee4a..542f94ba63 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -30,7 +30,7 @@ async fn main() { println!("Sending Query '{selector}'..."); let replies = session .get(&selector) - .value(value.map(Value::from)) + .value(value) .target(target) .timeout(timeout) .res() From e4501f403f11837a9d143dc9f3f91801498b33fa Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 28 Mar 2024 18:01:14 +0100 Subject: [PATCH 080/357] build warnings fixed --- zenoh/tests/routing.rs | 16 +--------------- zenoh/tests/session.rs | 1 - zenoh/tests/unicity.rs | 1 - 3 files changed, 1 insertion(+), 17 deletions(-) diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 056680ffd4..830f22a475 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -11,29 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::FutureExt; -use futures::future::try_join_all; -use futures::FutureExt as _; use std::str::FromStr; use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; -use std::{ - str::FromStr, - sync::{atomic::AtomicUsize, atomic::Ordering, Arc}, - time::Duration, -}; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; use zenoh::sample::builder::QoSBuilderTrait; -use zenoh::{ - config::{Config, ModeDependentValue}, - prelude::r#async::*, - Result, -}; -use zenoh::{value::Value, Result}; -use zenoh_core::zasync_executor_init; +use zenoh::Result; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; use zenoh_result::bail; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 640f23da52..cd7335c28e 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -16,7 +16,6 @@ use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::sample::builder::QoSBuilderTrait; -use zenoh_core::zasync_executor_init; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index f92a26d6c0..a71a0a8034 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -17,7 +17,6 @@ use std::time::Duration; use tokio::runtime::Handle; use zenoh::prelude::r#async::*; use zenoh::sample::builder::QoSBuilderTrait; -use zenoh_core::zasync_executor_init; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); From 1562a17b7a8a515e4a4ef98be7b23e9da47fbd48 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 28 Mar 2024 19:14:00 +0100 Subject: [PATCH 081/357] Protocol interest (#870) * Add InterestId in Declare message * Improve comments * Update commons/zenoh-protocol/src/network/declare.rs Co-authored-by: Mahmoud Mazouz * Update commons/zenoh-protocol/src/network/declare.rs Co-authored-by: Mahmoud Mazouz --------- Co-authored-by: Mahmoud Mazouz --- commons/zenoh-codec/src/network/declare.rs | 17 ++++++++++++++++- commons/zenoh-protocol/src/network/declare.rs | 17 +++++++++++------ zenoh/src/key_expr.rs | 1 + zenoh/src/net/routing/dispatcher/resource.rs | 1 + zenoh/src/net/routing/hat/client/pubsub.rs | 4 ++++ zenoh/src/net/routing/hat/client/queries.rs | 3 +++ .../net/routing/hat/linkstate_peer/pubsub.rs | 6 ++++++ .../net/routing/hat/linkstate_peer/queries.rs | 6 ++++++ zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 4 ++++ zenoh/src/net/routing/hat/p2p_peer/queries.rs | 3 +++ zenoh/src/net/routing/hat/router/pubsub.rs | 10 ++++++++++ zenoh/src/net/routing/hat/router/queries.rs | 10 ++++++++++ zenoh/src/net/runtime/adminspace.rs | 3 +++ zenoh/src/net/tests/tables.rs | 5 +++++ zenoh/src/session.rs | 7 +++++++ 15 files changed, 90 insertions(+), 7 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index c81514ab3e..d7a25ea0a9 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -95,6 +95,7 @@ where fn write(self, writer: &mut W, x: &Declare) -> Self::Output { let Declare { + interest_id, ext_qos, ext_tstamp, ext_nodeid, @@ -103,6 +104,9 @@ where // Header let mut header = id::DECLARE; + if x.interest_id.is_some() { + header |= declare::flag::I; + } let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); @@ -111,6 +115,11 @@ where } self.write(&mut *writer, header)?; + // Body + if let Some(interest_id) = interest_id { + self.write(&mut *writer, interest_id)?; + } + // Extensions if ext_qos != &declare::ext::QoSType::DEFAULT { n_exts -= 1; @@ -157,6 +166,11 @@ where return Err(DidntRead); } + let mut interest_id = None; + if imsg::has_flag(self.header, declare::flag::I) { + interest_id = Some(self.codec.read(&mut *reader)?); + } + // Extensions let mut ext_qos = declare::ext::QoSType::DEFAULT; let mut ext_tstamp = None; @@ -192,10 +206,11 @@ where let body: DeclareBody = self.codec.read(&mut *reader)?; Ok(Declare { - body, + interest_id, ext_qos, ext_tstamp, ext_nodeid, + body, }) } } diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index d41d8bf67f..10027259c2 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -25,20 +25,22 @@ pub use subscriber::*; pub use token::*; pub mod flag { - // pub const X: u8 = 1 << 5; // 0x20 Reserved - // pub const X: u8 = 1 << 6; // 0x40 Reserved + pub const I: u8 = 1 << 5; // 0x20 Interest if I==1 then the declare is in a response to an Interest with future==false + // pub const X: u8 = 1 << 6; // 0x40 Reserved pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } /// Flags: -/// - X: Reserved +/// - I: Interest If I==1 then the declare is in a response to an Interest with future==false /// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|X|X| DECLARE | +/// |Z|X|I| DECLARE | /// +-+-+-+---------+ +/// ~interest_id:z32~ if I==1 +/// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// ~ declaration ~ @@ -46,6 +48,7 @@ pub mod flag { /// #[derive(Debug, Clone, PartialEq, Eq)] pub struct Declare { + pub interest_id: Option, pub ext_qos: ext::QoSType, pub ext_tstamp: Option, pub ext_nodeid: ext::NodeIdType, @@ -132,16 +135,18 @@ impl Declare { let mut rng = rand::thread_rng(); - let body = DeclareBody::rand(); + let interest_id = rng.gen_bool(0.5).then_some(rng.gen::()); let ext_qos = ext::QoSType::rand(); let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); let ext_nodeid = ext::NodeIdType::rand(); + let body = DeclareBody::rand(); Self { - body, + interest_id, ext_qos, ext_tstamp, ext_nodeid, + body, } } } diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index f340f24cf1..aaa1d13724 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -664,6 +664,7 @@ impl SyncResolve for KeyExprUndeclaration<'_> { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(zenoh_protocol::network::Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 0450dab38a..194b97fca8 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -452,6 +452,7 @@ impl Resource { .insert(expr_id, nonwild_prefix.clone()); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 290f90f95f..e85bb77bf9 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -53,6 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -136,6 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -169,6 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -203,6 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 2ac3f1b993..5c0bc5349b 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -93,6 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -164,6 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -414,6 +418,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -455,6 +460,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 9fba744a9c..150c12a632 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -126,6 +126,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -169,6 +170,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -337,6 +339,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -362,6 +365,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index a722176292..b495248788 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -53,6 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -136,6 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -169,6 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -203,6 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 38f77bec45..72c32b9217 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -93,6 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -164,6 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -408,6 +412,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -559,6 +564,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -600,6 +606,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -628,6 +635,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -766,6 +774,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -791,6 +800,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 61abaa7c55..99e787beb5 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -194,6 +194,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -247,6 +248,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -471,6 +473,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -496,6 +499,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -768,6 +775,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -866,6 +874,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -891,6 +900,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 166ff16bd0..d460ee3f1c 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -276,6 +276,8 @@ impl AdminSpace { zlock!(admin.primitives).replace(primitives.clone()); primitives.send_declare(Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -287,6 +289,7 @@ impl AdminSpace { }); primitives.send_declare(Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 516bcd0109..4067f2ad8f 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -579,6 +579,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -606,6 +607,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -627,6 +629,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -654,6 +657,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -675,6 +679,7 @@ fn client_test() { Primitives::send_declare( primitives2.as_ref(), Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index b9e20a4e68..addb757807 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -872,6 +872,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1084,6 +1085,7 @@ impl Session { // }; primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1140,6 +1142,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1191,6 +1194,7 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1212,6 +1216,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1247,6 +1252,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1271,6 +1277,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, From 21fb0832d9cfa904bf787ef9d511572b5ce81755 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 10:43:07 +0100 Subject: [PATCH 082/357] Protocol batchsize (#873) * Use BatchSize typedef instead of u16 * Use BatchSize typedef instead of u16 for vsock --- commons/zenoh-codec/src/core/zint.rs | 68 ++++++++++--------- commons/zenoh-protocol/src/transport/init.rs | 4 +- commons/zenoh-protocol/src/transport/join.rs | 2 +- commons/zenoh-protocol/src/transport/mod.rs | 1 + io/zenoh-link-commons/src/lib.rs | 3 +- io/zenoh-link-commons/src/multicast.rs | 4 +- io/zenoh-link-commons/src/unicast.rs | 7 +- io/zenoh-links/zenoh-link-quic/src/lib.rs | 13 ++-- io/zenoh-links/zenoh-link-quic/src/unicast.rs | 3 +- io/zenoh-links/zenoh-link-serial/src/lib.rs | 7 +- .../zenoh-link-serial/src/unicast.rs | 3 +- io/zenoh-links/zenoh-link-tcp/src/lib.rs | 5 +- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 3 +- io/zenoh-links/zenoh-link-tls/src/lib.rs | 13 ++-- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 4 +- io/zenoh-links/zenoh-link-udp/src/lib.rs | 11 +-- .../zenoh-link-udp/src/multicast.rs | 3 +- io/zenoh-links/zenoh-link-udp/src/unicast.rs | 3 +- .../zenoh-link-unixpipe/src/unix/unicast.rs | 5 +- .../zenoh-link-unixsock_stream/src/lib.rs | 5 +- .../zenoh-link-unixsock_stream/src/unicast.rs | 3 +- io/zenoh-links/zenoh-link-vsock/src/lib.rs | 4 +- .../zenoh-link-vsock/src/unicast.rs | 8 ++- io/zenoh-links/zenoh-link-ws/src/lib.rs | 5 +- io/zenoh-links/zenoh-link-ws/src/unicast.rs | 3 +- io/zenoh-transport/src/common/pipeline.rs | 12 ++-- io/zenoh-transport/src/manager.rs | 6 +- .../src/unicast/establishment/cookie.rs | 9 ++- 28 files changed, 125 insertions(+), 92 deletions(-) diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index 0daff7348b..d5160e2ee6 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -17,38 +17,42 @@ use zenoh_buffers::{ writer::{DidntWrite, Writer}, }; -const VLE_LEN: usize = 9; +const VLE_LEN_MAX: usize = vle_len(u64::MAX); + +const fn vle_len(x: u64) -> usize { + const B1: u64 = u64::MAX << 7; + const B2: u64 = u64::MAX << (7 * 2); + const B3: u64 = u64::MAX << (7 * 3); + const B4: u64 = u64::MAX << (7 * 4); + const B5: u64 = u64::MAX << (7 * 5); + const B6: u64 = u64::MAX << (7 * 6); + const B7: u64 = u64::MAX << (7 * 7); + const B8: u64 = u64::MAX << (7 * 8); + + if (x & B1) == 0 { + 1 + } else if (x & B2) == 0 { + 2 + } else if (x & B3) == 0 { + 3 + } else if (x & B4) == 0 { + 4 + } else if (x & B5) == 0 { + 5 + } else if (x & B6) == 0 { + 6 + } else if (x & B7) == 0 { + 7 + } else if (x & B8) == 0 { + 8 + } else { + 9 + } +} impl LCodec for Zenoh080 { fn w_len(self, x: u64) -> usize { - const B1: u64 = u64::MAX << 7; - const B2: u64 = u64::MAX << (7 * 2); - const B3: u64 = u64::MAX << (7 * 3); - const B4: u64 = u64::MAX << (7 * 4); - const B5: u64 = u64::MAX << (7 * 5); - const B6: u64 = u64::MAX << (7 * 6); - const B7: u64 = u64::MAX << (7 * 7); - const B8: u64 = u64::MAX << (7 * 8); - - if (x & B1) == 0 { - 1 - } else if (x & B2) == 0 { - 2 - } else if (x & B3) == 0 { - 3 - } else if (x & B4) == 0 { - 4 - } else if (x & B5) == 0 { - 5 - } else if (x & B6) == 0 { - 6 - } else if (x & B7) == 0 { - 7 - } else if (x & B8) == 0 { - 8 - } else { - 9 - } + vle_len(x) } } @@ -107,7 +111,7 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, mut x: u64) -> Self::Output { - writer.with_slot(VLE_LEN, move |buffer| { + writer.with_slot(VLE_LEN_MAX, move |buffer| { let mut len = 0; while (x & !0x7f_u64) != 0 { // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is @@ -122,7 +126,7 @@ where } // In case len == VLE_LEN then all the bits have already been written in the latest iteration. // Else we haven't written all the necessary bytes yet. - if len != VLE_LEN { + if len != VLE_LEN_MAX { // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is // the maximum number of bytes a VLE can take once encoded. // I.e.: x is shifted 7 bits to the right every iteration, @@ -151,7 +155,7 @@ where let mut v = 0; let mut i = 0; // 7 * VLE_LEN is beyond the maximum number of shift bits - while (b & 0x80_u8) != 0 && i != 7 * (VLE_LEN - 1) { + while (b & 0x80_u8) != 0 && i != 7 * (VLE_LEN_MAX - 1) { v |= ((b & 0x7f_u8) as u64) << i; b = reader.read_u8()?; i += 7; diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index 1327288471..de517a353c 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -165,7 +165,7 @@ impl InitSyn { let whatami = WhatAmI::rand(); let zid = ZenohId::default(); let resolution = Resolution::rand(); - let batch_size: u16 = rng.gen(); + let batch_size: BatchSize = rng.gen(); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -221,7 +221,7 @@ impl InitAck { } else { Resolution::rand() }; - let batch_size: u16 = rng.gen(); + let batch_size: BatchSize = rng.gen(); let cookie = ZSlice::rand(64); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); diff --git a/commons/zenoh-protocol/src/transport/join.rs b/commons/zenoh-protocol/src/transport/join.rs index c5fbb98430..a5cf1422a6 100644 --- a/commons/zenoh-protocol/src/transport/join.rs +++ b/commons/zenoh-protocol/src/transport/join.rs @@ -141,7 +141,7 @@ impl Join { let whatami = WhatAmI::rand(); let zid = ZenohId::default(); let resolution = Resolution::rand(); - let batch_size: u16 = rng.gen(); + let batch_size: BatchSize = rng.gen(); let lease = if rng.gen_bool(0.5) { Duration::from_secs(rng.gen()) } else { diff --git a/commons/zenoh-protocol/src/transport/mod.rs b/commons/zenoh-protocol/src/transport/mod.rs index 1ea6fca144..e92860f441 100644 --- a/commons/zenoh-protocol/src/transport/mod.rs +++ b/commons/zenoh-protocol/src/transport/mod.rs @@ -39,6 +39,7 @@ use crate::network::NetworkMessage; /// the boundary of the serialized messages. The length is encoded as little-endian. /// In any case, the length of a message must not exceed 65_535 bytes. pub type BatchSize = u16; +pub type AtomicBatchSize = core::sync::atomic::AtomicU16; pub mod batch_size { use super::BatchSize; diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index f9ad7166ee..138726fd4f 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -32,6 +32,7 @@ pub use multicast::*; use serde::Serialize; pub use unicast::*; use zenoh_protocol::core::Locator; +use zenoh_protocol::transport::BatchSize; use zenoh_result::ZResult; /*************************************/ @@ -45,7 +46,7 @@ pub struct Link { pub src: Locator, pub dst: Locator, pub group: Option, - pub mtu: u16, + pub mtu: BatchSize, pub is_reliable: bool, pub is_streamed: bool, pub interfaces: Vec, diff --git a/io/zenoh-link-commons/src/multicast.rs b/io/zenoh-link-commons/src/multicast.rs index 65bc7195b6..ccfe6842c1 100644 --- a/io/zenoh-link-commons/src/multicast.rs +++ b/io/zenoh-link-commons/src/multicast.rs @@ -22,7 +22,7 @@ use zenoh_buffers::{reader::HasReader, writer::HasWriter}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{ core::{EndPoint, Locator}, - transport::TransportMessage, + transport::{BatchSize, TransportMessage}, }; use zenoh_result::{zerror, ZResult}; @@ -44,7 +44,7 @@ pub struct LinkMulticast(pub Arc); #[async_trait] pub trait LinkMulticastTrait: Send + Sync { - fn get_mtu(&self) -> u16; + fn get_mtu(&self) -> BatchSize; fn get_src(&self) -> &Locator; fn get_dst(&self) -> &Locator; fn is_reliable(&self) -> bool; diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index fe87e70e94..c21f4a008c 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -19,7 +19,10 @@ use core::{ ops::Deref, }; use std::net::SocketAddr; -use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::ZResult; pub type LinkManagerUnicast = Arc; @@ -41,7 +44,7 @@ pub struct LinkUnicast(pub Arc); #[async_trait] pub trait LinkUnicastTrait: Send + Sync { - fn get_mtu(&self) -> u16; + fn get_mtu(&self) -> BatchSize; fn get_src(&self) -> &Locator; fn get_dst(&self) -> &Locator; fn is_reliable(&self) -> bool; diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index c6d7e16087..4bcabaf5b6 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -28,9 +28,12 @@ use std::net::SocketAddr; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{ - endpoint::{Address, Parameters}, - Locator, +use zenoh_protocol::{ + core::{ + endpoint::{Address, Parameters}, + Locator, + }, + transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; @@ -47,7 +50,7 @@ pub const ALPN_QUIC_HTTP: &[&[u8]] = &[b"hq-29"]; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the QUIC MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const QUIC_MAX_MTU: u16 = u16::MAX; +const QUIC_MAX_MTU: BatchSize = BatchSize::MAX; pub const QUIC_LOCATOR_PREFIX: &str = "quic"; #[derive(Default, Clone, Copy, Debug)] @@ -137,7 +140,7 @@ impl ConfigurationInspector for QuicConfigurator { zconfigurable! { // Default MTU (QUIC PDU) in bytes. - static ref QUIC_DEFAULT_MTU: u16 = QUIC_MAX_MTU; + static ref QUIC_DEFAULT_MTU: BatchSize = QUIC_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 33953d666d..14a01861ca 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -34,6 +34,7 @@ use zenoh_link_commons::{ ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, ZError, ZResult}; pub struct LinkUnicastQuic { @@ -135,7 +136,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *QUIC_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-serial/src/lib.rs b/io/zenoh-links/zenoh-link-serial/src/lib.rs index fb4d7fcc12..f7b0b7afeb 100644 --- a/io/zenoh-links/zenoh-link-serial/src/lib.rs +++ b/io/zenoh-links/zenoh-link-serial/src/lib.rs @@ -25,10 +25,11 @@ pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::ZResult; // Maximum MTU (Serial PDU) in bytes. -const SERIAL_MAX_MTU: u16 = z_serial::MAX_MTU as u16; +const SERIAL_MAX_MTU: BatchSize = z_serial::MAX_MTU as BatchSize; const DEFAULT_BAUDRATE: u32 = 9_600; @@ -36,11 +37,11 @@ const DEFAULT_EXCLUSIVE: bool = true; pub const SERIAL_LOCATOR_PREFIX: &str = "serial"; -const SERIAL_MTU_LIMIT: u16 = SERIAL_MAX_MTU; +const SERIAL_MTU_LIMIT: BatchSize = SERIAL_MAX_MTU; zconfigurable! { // Default MTU (UDP PDU) in bytes. - static ref SERIAL_DEFAULT_MTU: u16 = SERIAL_MTU_LIMIT; + static ref SERIAL_DEFAULT_MTU: BatchSize = SERIAL_MTU_LIMIT; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref SERIAL_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index 0efa40ee90..0a5bea3c18 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -30,6 +30,7 @@ use zenoh_link_commons::{ NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; use z_serial::ZSerial; @@ -177,7 +178,7 @@ impl LinkUnicastTrait for LinkUnicastSerial { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *SERIAL_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-tcp/src/lib.rs b/io/zenoh-links/zenoh-link-tcp/src/lib.rs index 1a7d6ae705..0b075d9bf8 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/lib.rs @@ -22,6 +22,7 @@ use std::net::SocketAddr; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; mod unicast; @@ -33,7 +34,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TCP MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const TCP_MAX_MTU: u16 = u16::MAX; +const TCP_MAX_MTU: BatchSize = BatchSize::MAX; pub const TCP_LOCATOR_PREFIX: &str = "tcp"; @@ -52,7 +53,7 @@ impl LocatorInspector for TcpLocatorInspector { zconfigurable! { // Default MTU (TCP PDU) in bytes. - static ref TCP_DEFAULT_MTU: u16 = TCP_MAX_MTU; + static ref TCP_DEFAULT_MTU: BatchSize = TCP_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 7137ac0212..aaadcf3c23 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -25,6 +25,7 @@ use zenoh_link_commons::{ ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use super::{ @@ -145,7 +146,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *TCP_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-tls/src/lib.rs b/io/zenoh-links/zenoh-link-tls/src/lib.rs index 95d59104b4..7faebb4cd9 100644 --- a/io/zenoh-links/zenoh-link-tls/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tls/src/lib.rs @@ -30,9 +30,12 @@ use std::{convert::TryFrom, net::SocketAddr}; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{ - endpoint::{self, Address}, - Locator, +use zenoh_protocol::{ + core::{ + endpoint::{self, Address}, + Locator, + }, + transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; @@ -45,7 +48,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TLS MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const TLS_MAX_MTU: u16 = u16::MAX; +const TLS_MAX_MTU: BatchSize = BatchSize::MAX; pub const TLS_LOCATOR_PREFIX: &str = "tls"; #[derive(Default, Clone, Copy)] @@ -172,7 +175,7 @@ impl ConfigurationInspector for TlsConfigurator { zconfigurable! { // Default MTU (TLS PDU) in bytes. - static ref TLS_DEFAULT_MTU: u16 = TLS_MAX_MTU; + static ref TLS_DEFAULT_MTU: BatchSize = TLS_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 7da711161e..a58e7372dd 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -42,8 +42,8 @@ use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; -use zenoh_protocol::core::endpoint::Config; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::{core::endpoint::Config, transport::BatchSize}; use zenoh_result::{bail, zerror, ZError, ZResult}; pub struct LinkUnicastTls { @@ -180,7 +180,7 @@ impl LinkUnicastTrait for LinkUnicastTls { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *TLS_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-udp/src/lib.rs b/io/zenoh-links/zenoh-link-udp/src/lib.rs index 91d02cc13d..86db845d8f 100644 --- a/io/zenoh-links/zenoh-link-udp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-udp/src/lib.rs @@ -27,6 +27,7 @@ pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; // NOTE: In case of using UDP in high-throughput scenarios, it is recommended to set the @@ -44,24 +45,24 @@ use zenoh_result::{zerror, ZResult}; // Although in IPv6 it is possible to have UDP datagrams of size greater than 65,535 bytes via // IPv6 Jumbograms, its usage in Zenoh is discouraged unless the consequences are very well // understood. -const UDP_MAX_MTU: u16 = 65_507; +const UDP_MAX_MTU: BatchSize = 65_507; pub const UDP_LOCATOR_PREFIX: &str = "udp"; #[cfg(any(target_os = "linux", target_os = "windows"))] // Linux default value of a maximum datagram size is set to UDP MAX MTU. -const UDP_MTU_LIMIT: u16 = UDP_MAX_MTU; +const UDP_MTU_LIMIT: BatchSize = UDP_MAX_MTU; #[cfg(target_os = "macos")] // Mac OS X default value of a maximum datagram size is set to 9216 bytes. -const UDP_MTU_LIMIT: u16 = 9_216; +const UDP_MTU_LIMIT: BatchSize = 9_216; #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))] -const UDP_MTU_LIMIT: u16 = 8_192; +const UDP_MTU_LIMIT: BatchSize = 8_192; zconfigurable! { // Default MTU (UDP PDU) in bytes. - static ref UDP_DEFAULT_MTU: u16 = UDP_MTU_LIMIT; + static ref UDP_DEFAULT_MTU: BatchSize = UDP_MTU_LIMIT; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref UDP_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index bc894bd296..a6e7977052 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -21,6 +21,7 @@ use std::{borrow::Cow, fmt}; use tokio::net::UdpSocket; use zenoh_link_commons::{LinkManagerMulticastTrait, LinkMulticast, LinkMulticastTrait}; use zenoh_protocol::core::{Config, EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; pub struct LinkMulticastUdp { @@ -119,7 +120,7 @@ impl LinkMulticastTrait for LinkMulticastUdp { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *UDP_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 1cd4a0b1ec..5021969bfa 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -30,6 +30,7 @@ use zenoh_link_commons::{ LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use zenoh_sync::Mvar; @@ -200,7 +201,7 @@ impl LinkUnicastTrait for LinkUnicastUdp { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *UDP_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 0a0aebe730..3026d4e4b0 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -33,6 +33,7 @@ use tokio::io::Interest; use tokio_util::sync::CancellationToken; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_runtime::ZRuntime; use unix_named_pipe::{create, open_write}; @@ -45,7 +46,7 @@ use zenoh_result::{bail, ZResult}; use super::FILE_ACCESS_MASK; -const LINUX_PIPE_MAX_MTU: u16 = 65_535; +const LINUX_PIPE_MAX_MTU: BatchSize = BatchSize::MAX; const LINUX_PIPE_DEDICATE_TRIES: usize = 100; static PIPE_INVITATION: &[u8] = &[0xDE, 0xAD, 0xBE, 0xEF]; @@ -498,7 +499,7 @@ impl LinkUnicastTrait for UnicastPipe { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { LINUX_PIPE_MAX_MTU } diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs index b6c180cd8d..ce067c1aa2 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs @@ -21,6 +21,7 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::ZResult; #[cfg(target_family = "unix")] mod unicast; @@ -33,13 +34,13 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the UNIXSOCKSTREAM MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const UNIXSOCKSTREAM_MAX_MTU: u16 = u16::MAX; +const UNIXSOCKSTREAM_MAX_MTU: BatchSize = BatchSize::MAX; pub const UNIXSOCKSTREAM_LOCATOR_PREFIX: &str = "unixsock-stream"; zconfigurable! { // Default MTU (UNIXSOCKSTREAM PDU) in bytes. - static ref UNIXSOCKSTREAM_DEFAULT_MTU: u16 = UNIXSOCKSTREAM_MAX_MTU; + static ref UNIXSOCKSTREAM_DEFAULT_MTU: BatchSize = UNIXSOCKSTREAM_MAX_MTU; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index 53441ab89c..a961c1aebb 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -32,6 +32,7 @@ use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; use super::{get_unix_path_as_string, UNIXSOCKSTREAM_DEFAULT_MTU, UNIXSOCKSTREAM_LOCATOR_PREFIX}; @@ -119,7 +120,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *UNIXSOCKSTREAM_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-vsock/src/lib.rs b/io/zenoh-links/zenoh-link-vsock/src/lib.rs index 7834050796..d58250fed3 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/lib.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/lib.rs @@ -22,7 +22,7 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::Locator; +use zenoh_protocol::{core::Locator, transport::BatchSize}; use zenoh_result::ZResult; #[cfg(target_os = "linux")] @@ -47,7 +47,7 @@ impl LocatorInspector for VsockLocatorInspector { zconfigurable! { // Default MTU in bytes. - static ref VSOCK_DEFAULT_MTU: u16 = u16::MAX; + static ref VSOCK_DEFAULT_MTU: BatchSize = BatchSize::MAX; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref VSOCK_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index ced7b9dc15..59efa6f0e3 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -27,8 +27,10 @@ use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; -use zenoh_protocol::core::endpoint::Address; -use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::{ + core::{endpoint::Address, EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, ZResult}; use super::{VSOCK_ACCEPT_THROTTLE_TIME, VSOCK_DEFAULT_MTU, VSOCK_LOCATOR_PREFIX}; @@ -170,7 +172,7 @@ impl LinkUnicastTrait for LinkUnicastVsock { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *VSOCK_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-ws/src/lib.rs b/io/zenoh-links/zenoh-link-ws/src/lib.rs index f68a20d15d..d165b480a9 100644 --- a/io/zenoh-links/zenoh-link-ws/src/lib.rs +++ b/io/zenoh-links/zenoh-link-ws/src/lib.rs @@ -23,6 +23,7 @@ use url::Url; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, ZResult}; mod unicast; pub use unicast::*; @@ -33,7 +34,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TCP MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const WS_MAX_MTU: u16 = u16::MAX; +const WS_MAX_MTU: BatchSize = BatchSize::MAX; pub const WS_LOCATOR_PREFIX: &str = "ws"; @@ -51,7 +52,7 @@ impl LocatorInspector for WsLocatorInspector { zconfigurable! { // Default MTU (TCP PDU) in bytes. - static ref WS_DEFAULT_MTU: u16 = WS_MAX_MTU; + static ref WS_DEFAULT_MTU: BatchSize = WS_MAX_MTU; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref TCP_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index 6a0cf64e6e..acf568f78c 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -34,6 +34,7 @@ use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, ZResult}; use super::{get_ws_addr, get_ws_url, TCP_ACCEPT_THROTTLE_TIME, WS_DEFAULT_MTU, WS_LOCATOR_PREFIX}; @@ -200,7 +201,7 @@ impl LinkUnicastTrait for LinkUnicastWs { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *WS_DEFAULT_MTU } diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index fb95d709db..b74fa2990c 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -22,7 +22,7 @@ use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; use std::sync::{Arc, Mutex, MutexGuard}; use std::time::Duration; use std::{ - sync::atomic::{AtomicBool, AtomicU16, Ordering}, + sync::atomic::{AtomicBool, Ordering}, time::Instant, }; use zenoh_buffers::{ @@ -40,7 +40,7 @@ use zenoh_protocol::{ transport::{ fragment::FragmentHeader, frame::{self, FrameHeader}, - BatchSize, TransportMessage, + AtomicBatchSize, BatchSize, TransportMessage, }, }; @@ -75,7 +75,7 @@ impl StageInRefill { struct StageInOut { n_out_w: Sender<()>, s_out_w: RingBufferWriter, - bytes: Arc, + bytes: Arc, backoff: Arc, } @@ -355,12 +355,12 @@ enum Pull { struct Backoff { retry_time: NanoSeconds, last_bytes: BatchSize, - bytes: Arc, + bytes: Arc, backoff: Arc, } impl Backoff { - fn new(bytes: Arc, backoff: Arc) -> Self { + fn new(bytes: Arc, backoff: Arc) -> Self { Self { retry_time: 0, last_bytes: 0, @@ -552,7 +552,7 @@ impl TransmissionPipeline { // This is a SPSC ring buffer let (s_out_w, s_out_r) = RingBuffer::::init(); let current = Arc::new(Mutex::new(None)); - let bytes = Arc::new(AtomicU16::new(0)); + let bytes = Arc::new(AtomicBatchSize::new(0)); let backoff = Arc::new(AtomicBool::new(false)); stage_in.push(Mutex::new(StageIn { diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index f16a68cfba..2d7961ed2b 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -93,7 +93,7 @@ pub struct TransportManagerConfig { pub zid: ZenohId, pub whatami: WhatAmI, pub resolution: Resolution, - pub batch_size: u16, + pub batch_size: BatchSize, pub wait_before_drop: Duration, pub queue_size: [usize; Priority::NUM], pub queue_backoff: Duration, @@ -122,7 +122,7 @@ pub struct TransportManagerBuilder { zid: ZenohId, whatami: WhatAmI, resolution: Resolution, - batch_size: u16, + batch_size: BatchSize, wait_before_drop: Duration, queue_size: QueueSizeConf, queue_backoff: Duration, @@ -151,7 +151,7 @@ impl TransportManagerBuilder { self } - pub fn batch_size(mut self, batch_size: u16) -> Self { + pub fn batch_size(mut self, batch_size: BatchSize) -> Self { self.batch_size = batch_size; self } diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index 0db9e1c93a..6f0295601c 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -19,14 +19,17 @@ use zenoh_buffers::{ }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_crypto::{BlockCipher, PseudoRng}; -use zenoh_protocol::core::{Resolution, WhatAmI, ZenohId}; +use zenoh_protocol::{ + core::{Resolution, WhatAmI, ZenohId}, + transport::BatchSize, +}; #[derive(Debug, PartialEq)] pub(crate) struct Cookie { pub(crate) zid: ZenohId, pub(crate) whatami: WhatAmI, pub(crate) resolution: Resolution, - pub(crate) batch_size: u16, + pub(crate) batch_size: BatchSize, pub(crate) nonce: u64, // Extensions pub(crate) ext_qos: ext::qos::StateAccept, @@ -82,7 +85,7 @@ where let whatami = WhatAmI::try_from(wai).map_err(|_| DidntRead)?; let resolution: u8 = self.read(&mut *reader)?; let resolution = Resolution::from(resolution); - let batch_size: u16 = self.read(&mut *reader)?; + let batch_size: BatchSize = self.read(&mut *reader)?; let nonce: u64 = self.read(&mut *reader)?; // Extensions let ext_qos: ext::qos::StateAccept = self.read(&mut *reader)?; From 312c03a2a79e0d8a06904008331148efd2a5475a Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Fri, 29 Mar 2024 16:57:52 +0100 Subject: [PATCH 083/357] Query.reply and reply_del, now accept TryIntoKeyExpr instead of IntoKeyExpr (#878) --- zenoh/src/queryable.rs | 41 ++++++++++++++++++++++------------------- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 5 +---- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 599c0e13be..58589bfe8f 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -111,7 +111,7 @@ impl Query { #[inline(always)] #[cfg(feature = "unstable")] #[doc(hidden)] - pub fn reply_sample(&self, sample: Sample) -> ReplyBuilder<'_> { + pub fn reply_sample(&self, sample: Sample) -> ReplyBuilder<'_, 'static> { let Sample { key_expr, payload, @@ -126,7 +126,7 @@ impl Query { } = sample; ReplyBuilder { query: self, - key_expr, + key_expr: Ok(key_expr), payload, kind, encoding, @@ -145,18 +145,19 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply( + pub fn reply<'b, TryIntoKeyExpr, IntoPayload>( &self, - key_expr: IntoKeyExpr, + key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> ReplyBuilder<'_> + ) -> ReplyBuilder<'_, 'b> where - IntoKeyExpr: Into>, + TryIntoKeyExpr: TryInto>, + >>::Error: Into, IntoPayload: Into, { ReplyBuilder { query: self, - key_expr: key_expr.into(), + key_expr: key_expr.try_into().map_err(Into::into), payload: payload.into(), kind: SampleKind::Put, timestamp: None, @@ -187,13 +188,14 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply_del(&self, key_expr: IntoKeyExpr) -> ReplyBuilder<'_> + pub fn reply_del<'b, TryIntoKeyExpr>(&self, key_expr: TryIntoKeyExpr) -> ReplyBuilder<'_, 'b> where - IntoKeyExpr: Into>, + TryIntoKeyExpr: TryInto>, + >>::Error: Into, { ReplyBuilder { query: self, - key_expr: key_expr.into(), + key_expr: key_expr.try_into().map_err(Into::into), payload: Payload::empty(), kind: SampleKind::Delete, timestamp: None, @@ -248,9 +250,9 @@ impl fmt::Display for Query { /// A builder returned by [`Query::reply()`](Query::reply) or [`Query::reply()`](Query::reply). #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ReplyBuilder<'a> { +pub struct ReplyBuilder<'a, 'b> { query: &'a Query, - key_expr: KeyExpr<'static>, + key_expr: ZResult>, payload: Payload, kind: SampleKind, encoding: Encoding, @@ -270,7 +272,7 @@ pub struct ReplyErrBuilder<'a> { value: Value, } -impl<'a> ReplyBuilder<'a> { +impl<'a, 'b> ReplyBuilder<'a, 'b> { #[zenoh_macros::unstable] pub fn with_attachment(mut self, attachment: Attachment) -> Self { self.attachment = Some(attachment); @@ -292,16 +294,17 @@ impl<'a> ReplyBuilder<'a> { } } -impl<'a> Resolvable for ReplyBuilder<'a> { +impl<'a, 'b> Resolvable for ReplyBuilder<'a, 'b> { type To = ZResult<()>; } -impl SyncResolve for ReplyBuilder<'_> { +impl<'a, 'b> SyncResolve for ReplyBuilder<'a, 'b> { fn res_sync(self) -> ::To { + let key_expr = self.key_expr?; if !self.query._accepts_any_replies().unwrap_or(false) - && !self.query.key_expr().intersects(&self.key_expr) + && !self.query.key_expr().intersects(&key_expr) { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", self.key_expr, self.query.key_expr()) + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", &key_expr, self.query.key_expr()) } #[allow(unused_mut)] // will be unused if feature = "unstable" is not enabled let mut ext_sinfo = None; @@ -318,7 +321,7 @@ impl SyncResolve for ReplyBuilder<'_> { rid: self.query.inner.qid, wire_expr: WireExpr { scope: 0, - suffix: std::borrow::Cow::Owned(self.key_expr.into()), + suffix: std::borrow::Cow::Owned(key_expr.into()), mapping: Mapping::Sender, }, payload: ResponseBody::Reply(zenoh::Reply { @@ -360,7 +363,7 @@ impl SyncResolve for ReplyBuilder<'_> { } } -impl<'a> AsyncResolve for ReplyBuilder<'a> { +impl<'a, 'b> AsyncResolve for ReplyBuilder<'a, 'b> { type Future = Ready; fn res_async(self) -> Self::Future { diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index c34d06690a..b90f0f568f 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -137,7 +137,7 @@ impl Task { tokio::select! { _ = token.cancelled() => break, query = queryable.recv_async() => { - query?.reply(KeyExpr::try_from(ke.to_owned())?, payload.clone()).res_async().await?; + query?.reply(ke.to_owned(), payload.clone()).res_async().await?; }, } } diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 5e86499bc7..8c2d2e9937 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -164,10 +164,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re "ok_del" => { tokio::task::block_in_place(|| { tokio::runtime::Handle::current().block_on(async { - ztimeout!(query - .reply_del(KeyExpr::try_from(key_expr).unwrap()) - .res_async()) - .unwrap() + ztimeout!(query.reply_del(key_expr).res_async()).unwrap() }) }); } From 43a49379c0f126032f89505789d158b908c62ad6 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 18:18:28 +0100 Subject: [PATCH 084/357] SampleBuilder uses generics --- Cargo.lock | 50 ++--- zenoh/src/queryable.rs | 97 ++-------- zenoh/src/sample/builder.rs | 363 +++++++++++++++--------------------- 3 files changed, 189 insertions(+), 321 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3ea8978b5..9dff82ad80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,9 +165,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.12" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -1103,9 +1103,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c012a26a7f605efc424dd53697843a72be7dc86ad2d01f7814337794a12231d" +checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" dependencies = [ "anstream", "anstyle", @@ -1122,9 +1122,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "erased-serde" -version = "0.3.31" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" +checksum = "2b73807008a3c7f171cc40312f37d95ef0396e048b5848d775f54b1a4dd4a0d3" dependencies = [ "serde", ] @@ -1541,9 +1541,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -1854,9 +1854,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" dependencies = [ "serde", "value-bag", @@ -2865,9 +2865,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" dependencies = [ "log", "ring 0.17.6", @@ -2923,9 +2923,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.3.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" [[package]] name = "rustls-webpki" @@ -3701,9 +3701,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -3743,7 +3743,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.22.2", + "rustls 0.22.3", "rustls-pki-types", "tokio", ] @@ -4030,9 +4030,9 @@ dependencies = [ [[package]] name = "value-bag" -version = "1.4.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d92ccd67fb88503048c01b59152a04effd0782d035a83a6d256ce6085f08f4a3" +checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" dependencies = [ "value-bag-serde1", "value-bag-sval2", @@ -4040,9 +4040,9 @@ dependencies = [ [[package]] name = "value-bag-serde1" -version = "1.4.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0b9f3feef403a50d4d67e9741a6d8fc688bcbb4e4f31bd4aab72cc690284394" +checksum = "cc35703541cbccb5278ef7b589d79439fc808ff0b5867195a3230f9a47421d39" dependencies = [ "erased-serde", "serde", @@ -4051,9 +4051,9 @@ dependencies = [ [[package]] name = "value-bag-sval2" -version = "1.4.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b24f4146b6f3361e91cbf527d1fb35e9376c3c0cef72ca5ec5af6d640fad7d" +checksum = "285b43c29d0b4c0e65aad24561baee67a1b69dc9be9375d4a85138cbf556f7f8" dependencies = [ "sval", "sval_buffer", @@ -4676,7 +4676,7 @@ dependencies = [ "flume", "futures", "log", - "rustls 0.22.2", + "rustls 0.22.3", "rustls-webpki 0.102.2", "serde", "tokio", @@ -4763,7 +4763,7 @@ dependencies = [ "base64 0.21.4", "futures", "log", - "rustls 0.22.2", + "rustls 0.22.3", "rustls-pemfile 2.0.0", "rustls-pki-types", "rustls-webpki 0.102.2", diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 5df0d73d44..0e977f3def 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -19,8 +19,8 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::builder::{ - DeleteSampleBuilder, PutSampleBuilder, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, - TimestampBuilderTrait, ValueBuilderTrait, + OpDelete, OpPut, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, + ValueBuilderTrait, }; use crate::sample::SourceInfo; use crate::Id; @@ -115,10 +115,10 @@ impl Query { #[inline(always)] #[cfg(feature = "unstable")] #[doc(hidden)] - pub fn reply_sample(&self, sample: Sample) -> ReplySampleBuilder<'_> { - ReplySampleBuilder { + pub fn reply_sample(&self, sample: Sample) -> ReplySample<'_> { + ReplySample { query: self, - sample_builder: sample.into(), + sample, } } @@ -168,7 +168,7 @@ impl Query { IntoKeyExpr: Into>, { let sample_builder = - DeleteSampleBuilder::new(key_expr).with_qos(response::ext::QoSType::RESPONSE.into()); + SampleBuilder::delete(key_expr).qos(response::ext::QoSType::RESPONSE.into()); ReplyDelBuilder { query: self, sample_builder, @@ -214,91 +214,22 @@ impl fmt::Display for Query { } } -pub struct ReplySampleBuilder<'a> { +pub struct ReplySample<'a> { query: &'a Query, - sample_builder: SampleBuilder, + sample: Sample, } -impl<'a> ReplySampleBuilder<'a> { - pub fn put(self, payload: IntoPayload) -> ReplyBuilder<'a> - where - IntoPayload: Into, - { - let builder = ReplyBuilder { - query: self.query, - sample_builder: self.sample_builder.into(), - }; - builder.payload(payload) - } - pub fn delete(self) -> ReplyDelBuilder<'a> { - ReplyDelBuilder { - query: self.query, - sample_builder: self.sample_builder.into(), - } - } -} - -impl TimestampBuilderTrait for ReplySampleBuilder<'_> { - fn timestamp>>(self, timestamp: T) -> Self { - Self { - sample_builder: self.sample_builder.timestamp(timestamp), - ..self - } - } -} - -impl SampleBuilderTrait for ReplySampleBuilder<'_> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - sample_builder: self.sample_builder.source_info(source_info), - ..self - } - } - - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - sample_builder: self.sample_builder.attachment(attachment), - ..self - } - } -} - -impl QoSBuilderTrait for ReplySampleBuilder<'_> { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - sample_builder: self.sample_builder.congestion_control(congestion_control), - ..self - } - } - - fn priority(self, priority: Priority) -> Self { - Self { - sample_builder: self.sample_builder.priority(priority), - ..self - } - } - - fn express(self, is_express: bool) -> Self { - Self { - sample_builder: self.sample_builder.express(is_express), - ..self - } - } -} - -impl Resolvable for ReplySampleBuilder<'_> { +impl Resolvable for ReplySample<'_> { type To = ZResult<()>; } -impl SyncResolve for ReplySampleBuilder<'_> { +impl SyncResolve for ReplySample<'_> { fn res_sync(self) -> ::To { - self.query._reply_sample(self.sample_builder.into()) + self.query._reply_sample(self.sample) } } -impl AsyncResolve for ReplySampleBuilder<'_> { +impl AsyncResolve for ReplySample<'_> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -311,7 +242,7 @@ impl AsyncResolve for ReplySampleBuilder<'_> { #[derive(Debug)] pub struct ReplyBuilder<'a> { query: &'a Query, - sample_builder: PutSampleBuilder, + sample_builder: SampleBuilder, } impl TimestampBuilderTrait for ReplyBuilder<'_> { @@ -392,7 +323,7 @@ impl ValueBuilderTrait for ReplyBuilder<'_> { #[derive(Debug)] pub struct ReplyDelBuilder<'a> { query: &'a Query, - sample_builder: DeleteSampleBuilder, + sample_builder: SampleBuilder, } impl TimestampBuilderTrait for ReplyDelBuilder<'_> { diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 920bd2b7b7..cae58514ff 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -12,6 +12,8 @@ // ZettaScale Zenoh Team, // +use std::marker::PhantomData; + use crate::sample::Attachment; use crate::sample::QoS; use crate::sample::QoSBuilder; @@ -63,290 +65,225 @@ pub trait ValueBuilderTrait { } #[derive(Debug)] -pub struct SampleBuilder(Sample); +pub struct OpPut; +#[derive(Debug)] +pub struct OpDelete; +#[derive(Debug)] +pub struct OpAny; -impl SampleBuilder { +#[derive(Debug)] +pub struct SampleBuilder { + sample: Sample, + _t: PhantomData, +} + +impl SampleBuilder { pub fn put( key_expr: IntoKeyExpr, payload: IntoPayload, - ) -> PutSampleBuilder + ) -> SampleBuilder where IntoKeyExpr: Into>, IntoPayload: Into, { - PutSampleBuilder::new(key_expr, payload) + Self { + sample: Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + }, + _t: PhantomData::, + } } - pub fn delete(key_expr: IntoKeyExpr) -> DeleteSampleBuilder +} + +impl SampleBuilder { + pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder where IntoKeyExpr: Into>, { - DeleteSampleBuilder::new(key_expr) + Self { + sample: Sample { + key_expr: key_expr.into(), + payload: Payload::empty(), + kind: SampleKind::Delete, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + }, + _t: PhantomData::, + } } +} + +impl SampleBuilder { /// Allows to change keyexpr of [`Sample`] pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self where IntoKeyExpr: Into>, { - Self(Sample { - key_expr: key_expr.into(), - ..self.0 - }) + Self { + sample: Sample { + key_expr: key_expr.into(), + ..self.sample + }, + _t: PhantomData::, + } } } -impl TimestampBuilderTrait for SampleBuilder { - fn timestamp>>(self, timestamp: T) -> Self { - Self(Sample { - timestamp: timestamp.into(), - ..self.0 - }) +impl TimestampBuilderTrait for SampleBuilder { + fn timestamp>>(self, timestamp: U) -> Self { + Self { + sample: Sample { + timestamp: timestamp.into(), + ..self.sample + }, + _t: PhantomData::, + } } } -impl SampleBuilderTrait for SampleBuilder { +impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self { - Self(Sample { - source_info, - ..self.0 - }) + Self { + sample: Sample { + source_info, + ..self.sample + }, + _t: PhantomData::, + } } #[zenoh_macros::unstable] - fn attachment>>(self, attachment: T) -> Self { - Self(Sample { - attachment: attachment.into(), - ..self.0 - }) - } -} - -impl QoSBuilderTrait for SampleBuilder { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.congestion_control(congestion_control).into(); - Self(Sample { qos, ..self.0 }) - } - fn priority(self, priority: Priority) -> Self { - let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.priority(priority).into(); - Self(Sample { qos, ..self.0 }) - } - fn express(self, is_express: bool) -> Self { - let qos: QoSBuilder = self.0.qos.into(); - let qos = qos.express(is_express).into(); - Self(Sample { qos, ..self.0 }) - } -} - -#[derive(Debug)] -pub struct PutSampleBuilder(SampleBuilder); - -impl From for PutSampleBuilder { - fn from(sample_builder: SampleBuilder) -> Self { - Self(SampleBuilder(Sample { - kind: SampleKind::Put, - ..sample_builder.0 - })) - } -} - -impl PutSampleBuilder { - fn new(key_expr: IntoKeyExpr, payload: IntoPayload) -> Self - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - Self(SampleBuilder::from(Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: SampleKind::Put, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - })) - } - /// Allows to change keyexpr of [`Sample`] - pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(self.0.keyexpr(key_expr)) - } - // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. - pub(crate) fn qos(self, qos: QoS) -> Self { - Self(SampleBuilder(Sample { qos, ..self.0 .0 })) - } -} - -impl TimestampBuilderTrait for PutSampleBuilder { - fn timestamp>>(self, timestamp: T) -> Self { - Self(self.0.timestamp(timestamp)) + fn attachment>>(self, attachment: U) -> Self { + Self { + sample: Sample { + attachment: attachment.into(), + ..self.sample + }, + _t: PhantomData::, + } } } -impl SampleBuilderTrait for PutSampleBuilder { - #[zenoh_macros::unstable] - fn source_info(self, source_info: SourceInfo) -> Self { - Self(self.0.source_info(source_info)) - } - #[zenoh_macros::unstable] - fn attachment>>(self, attachment: T) -> Self { - Self(self.0.attachment(attachment)) +impl SampleBuilder { + pub fn qos(self, qos: QoS) -> Self { + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } } } -impl QoSBuilderTrait for PutSampleBuilder { +impl QoSBuilderTrait for SampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self(self.0.congestion_control(congestion_control)) + let qos: QoSBuilder = self.sample.qos.into(); + let qos = qos.congestion_control(congestion_control).into(); + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } } fn priority(self, priority: Priority) -> Self { - Self(self.0.priority(priority)) + let qos: QoSBuilder = self.sample.qos.into(); + let qos = qos.priority(priority).into(); + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } } fn express(self, is_express: bool) -> Self { - Self(self.0.express(is_express)) + let qos: QoSBuilder = self.sample.qos.into(); + let qos = qos.express(is_express).into(); + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } } } -impl ValueBuilderTrait for PutSampleBuilder { +impl ValueBuilderTrait for SampleBuilder { fn encoding>(self, encoding: T) -> Self { - Self(SampleBuilder(Sample { - encoding: encoding.into(), - ..self.0 .0 - })) + Self { + sample: Sample { + encoding: encoding.into(), + ..self.sample + }, + _t: PhantomData::, + } } fn payload>(self, payload: T) -> Self { - Self(SampleBuilder(Sample { - payload: payload.into(), - ..self.0 .0 - })) + Self { + sample: Sample { + payload: payload.into(), + ..self.sample + }, + _t: PhantomData::, + } } fn value>(self, value: T) -> Self { let Value { payload, encoding } = value.into(); - Self(SampleBuilder(Sample { - payload, - encoding, - ..self.0 .0 - })) - } -} - -#[derive(Debug)] -pub struct DeleteSampleBuilder(SampleBuilder); - -impl From for DeleteSampleBuilder { - fn from(sample_builder: SampleBuilder) -> Self { - Self(SampleBuilder(Sample { - kind: SampleKind::Delete, - ..sample_builder.0 - })) - } -} - -impl DeleteSampleBuilder { - pub fn new(key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(SampleBuilder::from(Sample { - key_expr: key_expr.into(), - payload: Payload::empty(), - kind: SampleKind::Delete, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - })) - } - /// Allows to change keyexpr of [`Sample`] - pub fn with_keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self(self.0.keyexpr(key_expr)) - } - // It's convenient to set QoS as a whole for internal usage. For user API there are `congestion_control`, `priority` and `express` methods. - pub(crate) fn with_qos(self, qos: QoS) -> Self { - Self(SampleBuilder(Sample { qos, ..self.0 .0 })) - } -} - -impl TimestampBuilderTrait for DeleteSampleBuilder { - fn timestamp>>(self, timestamp: T) -> Self { - Self(self.0.timestamp(timestamp)) - } -} - -impl SampleBuilderTrait for DeleteSampleBuilder { - #[zenoh_macros::unstable] - fn source_info(self, source_info: SourceInfo) -> Self { - Self(self.0.source_info(source_info)) - } - #[zenoh_macros::unstable] - fn attachment>>(self, attachment: T) -> Self { - Self(self.0.attachment(attachment)) - } -} - -impl QoSBuilderTrait for DeleteSampleBuilder { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self(self.0.congestion_control(congestion_control)) - } - fn priority(self, priority: Priority) -> Self { - Self(self.0.priority(priority)) - } - fn express(self, is_express: bool) -> Self { - Self(self.0.express(is_express)) + Self { + sample: Sample { + payload, + encoding, + ..self.sample + }, + _t: PhantomData::, + } } } -impl From for SampleBuilder { +impl From for SampleBuilder { fn from(sample: Sample) -> Self { - SampleBuilder(sample) + SampleBuilder { + sample, + _t: PhantomData::, + } } } -impl TryFrom for PutSampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Put { bail!("Sample is not a put sample") } - Ok(Self(SampleBuilder(sample))) + Ok(SampleBuilder { + sample, + _t: PhantomData::, + }) } } -impl TryFrom for DeleteSampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Delete { bail!("Sample is not a delete sample") } - Ok(Self(SampleBuilder(sample))) - } -} - -impl From for Sample { - fn from(sample_builder: SampleBuilder) -> Self { - sample_builder.0 - } -} - -impl From for Sample { - fn from(put_sample_builder: PutSampleBuilder) -> Self { - put_sample_builder.0 .0 + Ok(SampleBuilder { + sample, + _t: PhantomData::, + }) } } -impl From for Sample { - fn from(delete_sample_builder: DeleteSampleBuilder) -> Self { - delete_sample_builder.0 .0 +impl From> for Sample { + fn from(sample_builder: SampleBuilder) -> Self { + sample_builder.sample } } From 6c305a130043a66ee58f3985eb4f71eb708ff5dc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 18:35:40 +0100 Subject: [PATCH 085/357] Improve Query builders with generics --- zenoh/src/queryable.rs | 178 +++++++++++++----------------------- zenoh/src/sample/builder.rs | 65 ++++++------- 2 files changed, 99 insertions(+), 144 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 0e977f3def..fea148e6e6 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -19,7 +19,7 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::builder::{ - OpDelete, OpPut, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, + op, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; use crate::sample::SourceInfo; @@ -132,18 +132,19 @@ impl Query { &self, key_expr: IntoKeyExpr, payload: IntoPayload, - ) -> ReplyBuilder<'_> + ) -> ReplySampleBuilder<'_, op::Put> where IntoKeyExpr: Into>, IntoPayload: Into, { let sample_builder = SampleBuilder::put(key_expr, payload).qos(response::ext::QoSType::RESPONSE.into()); - ReplyBuilder { + ReplySampleBuilder { query: self, sample_builder, } } + /// Sends a error reply to this Query. /// #[inline(always)] @@ -163,13 +164,16 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply_del(&self, key_expr: IntoKeyExpr) -> ReplyDelBuilder<'_> + pub fn reply_del( + &self, + key_expr: IntoKeyExpr, + ) -> ReplySampleBuilder<'_, op::Delete> where IntoKeyExpr: Into>, { let sample_builder = SampleBuilder::delete(key_expr).qos(response::ext::QoSType::RESPONSE.into()); - ReplyDelBuilder { + ReplySampleBuilder { query: self, sample_builder, } @@ -240,13 +244,13 @@ impl AsyncResolve for ReplySample<'_> { /// A builder returned by [`Query::reply()`](Query::reply) #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ReplyBuilder<'a> { +pub struct ReplySampleBuilder<'a, T> { query: &'a Query, - sample_builder: SampleBuilder, + sample_builder: SampleBuilder, } -impl TimestampBuilderTrait for ReplyBuilder<'_> { - fn timestamp>>(self, timestamp: T) -> Self { +impl TimestampBuilderTrait for ReplySampleBuilder<'_, T> { + fn timestamp>>(self, timestamp: U) -> Self { Self { sample_builder: self.sample_builder.timestamp(timestamp), ..self @@ -254,7 +258,7 @@ impl TimestampBuilderTrait for ReplyBuilder<'_> { } } -impl SampleBuilderTrait for ReplyBuilder<'_> { +impl SampleBuilderTrait for ReplySampleBuilder<'_, T> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { Self { @@ -264,7 +268,7 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { + fn attachment>>(self, attachment: U) -> Self { Self { sample_builder: self.sample_builder.attachment(attachment), ..self @@ -272,7 +276,7 @@ impl SampleBuilderTrait for ReplyBuilder<'_> { } } -impl QoSBuilderTrait for ReplyBuilder<'_> { +impl QoSBuilderTrait for ReplySampleBuilder<'_, T> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { sample_builder: self.sample_builder.congestion_control(congestion_control), @@ -295,7 +299,7 @@ impl QoSBuilderTrait for ReplyBuilder<'_> { } } -impl ValueBuilderTrait for ReplyBuilder<'_> { +impl ValueBuilderTrait for ReplySampleBuilder<'_, op::Put> { fn encoding>(self, encoding: T) -> Self { Self { sample_builder: self.sample_builder.encoding(encoding), @@ -318,101 +322,86 @@ impl ValueBuilderTrait for ReplyBuilder<'_> { } } -/// A builder returned by [`Query::reply_del()`](Query::reply) -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct ReplyDelBuilder<'a> { - query: &'a Query, - sample_builder: SampleBuilder, +impl<'a, T> Resolvable for ReplySampleBuilder<'a, T> { + type To = ZResult<()>; } -impl TimestampBuilderTrait for ReplyDelBuilder<'_> { - fn timestamp>>(self, timestamp: T) -> Self { - Self { - sample_builder: self.sample_builder.timestamp(timestamp), - ..self - } +impl SyncResolve for ReplySampleBuilder<'_, T> { + fn res_sync(self) -> ::To { + self.query._reply_sample(self.sample_builder.into()) } } -impl SampleBuilderTrait for ReplyDelBuilder<'_> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - sample_builder: self.sample_builder.source_info(source_info), - ..self - } - } +impl<'a, T> AsyncResolve for ReplySampleBuilder<'a, T> { + type Future = Ready; - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - sample_builder: self.sample_builder.attachment(attachment), - ..self - } + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) } } -impl QoSBuilderTrait for ReplyDelBuilder<'_> { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { +/// A builder returned by [`Query::reply_err()`](Query::reply_err). +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct ReplyErrBuilder<'a> { + query: &'a Query, + value: Value, +} + +impl ValueBuilderTrait for ReplyErrBuilder<'_> { + fn encoding>(self, encoding: T) -> Self { Self { - sample_builder: self.sample_builder.congestion_control(congestion_control), + value: self.value.encoding(encoding), ..self } } - fn priority(self, priority: Priority) -> Self { + fn payload>(self, payload: T) -> Self { Self { - sample_builder: self.sample_builder.priority(priority), + value: self.value.payload(payload), ..self } } - fn express(self, is_express: bool) -> Self { + fn value>(self, value: T) -> Self { Self { - sample_builder: self.sample_builder.express(is_express), + value: value.into(), ..self } } } -/// A builder returned by [`Query::reply_err()`](Query::reply_err). -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct ReplyErrBuilder<'a> { - query: &'a Query, - value: Value, -} - -impl<'a> Resolvable for ReplyBuilder<'a> { - type To = ZResult<()>; -} - -impl SyncResolve for ReplyBuilder<'_> { - fn res_sync(self) -> ::To { - self.query._reply_sample(self.sample_builder.into()) - } -} - -impl<'a> Resolvable for ReplyDelBuilder<'a> { +impl<'a> Resolvable for ReplyErrBuilder<'a> { type To = ZResult<()>; } -impl SyncResolve for ReplyDelBuilder<'_> { +impl SyncResolve for ReplyErrBuilder<'_> { fn res_sync(self) -> ::To { - self.query._reply_sample(self.sample_builder.into()) - } -} - -impl<'a> AsyncResolve for ReplyBuilder<'a> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + self.query.inner.primitives.send_response(Response { + rid: self.query.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Err(zenoh::Err { + encoding: self.value.encoding.into(), + ext_sinfo: None, + ext_unknown: vec![], + payload: self.value.payload.into(), + }), + ext_qos: response::ext::QoSType::RESPONSE, + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.query.inner.zid, + eid: self.query.eid, + }), + }); + Ok(()) } } -impl<'a> AsyncResolve for ReplyDelBuilder<'a> { +impl<'a> AsyncResolve for ReplyErrBuilder<'a> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -477,43 +466,6 @@ impl Query { } } -impl<'a> Resolvable for ReplyErrBuilder<'a> { - type To = ZResult<()>; -} - -impl SyncResolve for ReplyErrBuilder<'_> { - fn res_sync(self) -> ::To { - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Err(zenoh::Err { - encoding: self.value.encoding.into(), - ext_sinfo: None, - ext_unknown: vec![], - payload: self.value.payload.into(), - }), - ext_qos: response::ext::QoSType::RESPONSE, - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: self.query.eid, - }), - }); - Ok(()) - } -} -impl<'a> AsyncResolve for ReplyErrBuilder<'a> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - pub(crate) struct QueryableState { pub(crate) id: Id, pub(crate) key_expr: WireExpr<'static>, diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index cae58514ff..1ec20209aa 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -64,12 +64,16 @@ pub trait ValueBuilderTrait { fn value>(self, value: T) -> Self; } -#[derive(Debug)] -pub struct OpPut; -#[derive(Debug)] -pub struct OpDelete; -#[derive(Debug)] -pub struct OpAny; +pub mod op { + #[derive(Debug)] + pub struct Put; + #[derive(Debug)] + pub struct Delete; + #[derive(Debug)] + pub struct Error; + #[derive(Debug)] + pub struct Any; +} #[derive(Debug)] pub struct SampleBuilder { @@ -77,11 +81,11 @@ pub struct SampleBuilder { _t: PhantomData, } -impl SampleBuilder { +impl SampleBuilder { pub fn put( key_expr: IntoKeyExpr, payload: IntoPayload, - ) -> SampleBuilder + ) -> SampleBuilder where IntoKeyExpr: Into>, IntoPayload: Into, @@ -99,13 +103,13 @@ impl SampleBuilder { #[cfg(feature = "unstable")] attachment: None, }, - _t: PhantomData::, + _t: PhantomData::, } } } -impl SampleBuilder { - pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder +impl SampleBuilder { + pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder where IntoKeyExpr: Into>, { @@ -122,7 +126,7 @@ impl SampleBuilder { #[cfg(feature = "unstable")] attachment: None, }, - _t: PhantomData::, + _t: PhantomData::, } } } @@ -141,6 +145,14 @@ impl SampleBuilder { _t: PhantomData::, } } + + // Allows to change qos as a whole of [`Sample`] + pub fn qos(self, qos: QoS) -> Self { + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } + } } impl TimestampBuilderTrait for SampleBuilder { @@ -179,15 +191,6 @@ impl SampleBuilderTrait for SampleBuilder { } } -impl SampleBuilder { - pub fn qos(self, qos: QoS) -> Self { - Self { - sample: Sample { qos, ..self.sample }, - _t: PhantomData::, - } - } -} - impl QoSBuilderTrait for SampleBuilder { fn congestion_control(self, congestion_control: CongestionControl) -> Self { let qos: QoSBuilder = self.sample.qos.into(); @@ -215,14 +218,14 @@ impl QoSBuilderTrait for SampleBuilder { } } -impl ValueBuilderTrait for SampleBuilder { +impl ValueBuilderTrait for SampleBuilder { fn encoding>(self, encoding: T) -> Self { Self { sample: Sample { encoding: encoding.into(), ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } fn payload>(self, payload: T) -> Self { @@ -231,7 +234,7 @@ impl ValueBuilderTrait for SampleBuilder { payload: payload.into(), ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } fn value>(self, value: T) -> Self { @@ -242,21 +245,21 @@ impl ValueBuilderTrait for SampleBuilder { encoding, ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } } -impl From for SampleBuilder { +impl From for SampleBuilder { fn from(sample: Sample) -> Self { SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, } } } -impl TryFrom for SampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Put { @@ -264,12 +267,12 @@ impl TryFrom for SampleBuilder { } Ok(SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, }) } } -impl TryFrom for SampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Delete { @@ -277,7 +280,7 @@ impl TryFrom for SampleBuilder { } Ok(SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, }) } } From bca953da3de684228241cbd1c8bc8641945b2b84 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 18:36:58 +0100 Subject: [PATCH 086/357] Reorg sample files --- zenoh/src/sample.rs | 655 -------------------------------------------- 1 file changed, 655 deletions(-) delete mode 100644 zenoh/src/sample.rs diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs deleted file mode 100644 index 2b71105d5e..0000000000 --- a/zenoh/src/sample.rs +++ /dev/null @@ -1,655 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Sample primitives -use crate::encoding::Encoding; -use crate::payload::Payload; -use crate::prelude::{KeyExpr, Value}; -use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; -use crate::time::Timestamp; -use crate::Priority; -#[zenoh_macros::unstable] -use serde::Serialize; -use std::{convert::TryFrom, fmt}; -use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::network::declare::ext::QoSType; -use zenoh_protocol::{core::CongestionControl, zenoh}; - -pub mod builder; - -pub type SourceSn = u64; - -/// The locality of samples to be received by subscribers or targeted by publishers. -#[zenoh_macros::unstable] -#[derive(Clone, Copy, Debug, Default, Serialize, PartialEq, Eq)] -pub enum Locality { - SessionLocal, - Remote, - #[default] - Any, -} -#[cfg(not(feature = "unstable"))] -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] -pub(crate) enum Locality { - SessionLocal, - Remote, - #[default] - Any, -} - -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub(crate) struct DataInfo { - pub kind: SampleKind, - pub encoding: Option, - pub timestamp: Option, - pub source_id: Option, - pub source_sn: Option, - pub qos: QoS, -} - -pub(crate) trait DataInfoIntoSample { - fn into_sample( - self, - key_expr: IntoKeyExpr, - payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, - ) -> Sample - where - IntoKeyExpr: Into>, - IntoPayload: Into; -} - -impl DataInfoIntoSample for DataInfo { - // This function is for internal use only. - // Technically it may create invalid sample (e.g. a delete sample with a payload and encoding) - // The test for it is intentionally not added to avoid inserting extra "if" into hot path. - // The correctness of the data should be ensured by the caller. - #[inline] - fn into_sample( - self, - key_expr: IntoKeyExpr, - payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, - ) -> Sample - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: self.kind, - encoding: self.encoding.unwrap_or_default(), - timestamp: self.timestamp, - qos: self.qos, - #[cfg(feature = "unstable")] - source_info: SourceInfo { - source_id: self.source_id, - source_sn: self.source_sn, - }, - #[cfg(feature = "unstable")] - attachment, - } - } -} - -impl DataInfoIntoSample for Option { - #[inline] - fn into_sample( - self, - key_expr: IntoKeyExpr, - payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, - ) -> Sample - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - if let Some(data_info) = self { - data_info.into_sample(key_expr, payload, attachment) - } else { - Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: SampleKind::Put, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment, - } - } - } -} - -/// Informations on the source of a zenoh [`Sample`]. -#[zenoh_macros::unstable] -#[derive(Debug, Clone)] -pub struct SourceInfo { - /// The [`EntityGlobalId`] of the zenoh entity that published the concerned [`Sample`]. - pub source_id: Option, - /// The sequence number of the [`Sample`] from the source. - pub source_sn: Option, -} - -#[test] -#[cfg(feature = "unstable")] -fn source_info_stack_size() { - use crate::{ - sample::{SourceInfo, SourceSn}, - ZenohId, - }; - - assert_eq!(std::mem::size_of::(), 16); - assert_eq!(std::mem::size_of::>(), 17); - assert_eq!(std::mem::size_of::>(), 16); - assert_eq!(std::mem::size_of::(), 17 + 16 + 7); -} - -#[zenoh_macros::unstable] -impl SourceInfo { - pub(crate) fn empty() -> Self { - SourceInfo { - source_id: None, - source_sn: None, - } - } - pub(crate) fn is_empty(&self) -> bool { - self.source_id.is_none() && self.source_sn.is_none() - } -} - -impl From for Option { - fn from(source_info: SourceInfo) -> Option { - if source_info.is_empty() { - None - } else { - Some(zenoh::put::ext::SourceInfoType { - id: source_info.source_id.unwrap_or_default(), - sn: source_info.source_sn.unwrap_or_default() as u32, - }) - } - } -} - -#[zenoh_macros::unstable] -impl From for SourceInfo { - fn from(data_info: DataInfo) -> Self { - SourceInfo { - source_id: data_info.source_id, - source_sn: data_info.source_sn, - } - } -} - -#[zenoh_macros::unstable] -impl From> for SourceInfo { - fn from(data_info: Option) -> Self { - match data_info { - Some(data_info) => data_info.into(), - None => SourceInfo::empty(), - } - } -} - -mod attachment { - #[zenoh_macros::unstable] - use zenoh_buffers::{ - reader::{HasReader, Reader}, - writer::HasWriter, - ZBuf, ZBufReader, ZSlice, - }; - #[zenoh_macros::unstable] - use zenoh_codec::{RCodec, WCodec, Zenoh080}; - #[zenoh_macros::unstable] - use zenoh_protocol::zenoh::ext::AttachmentType; - - /// A builder for [`Attachment`] - #[zenoh_macros::unstable] - #[derive(Debug)] - pub struct AttachmentBuilder { - pub(crate) inner: Vec, - } - #[zenoh_macros::unstable] - impl Default for AttachmentBuilder { - fn default() -> Self { - Self::new() - } - } - #[zenoh_macros::unstable] - impl AttachmentBuilder { - pub fn new() -> Self { - Self { inner: Vec::new() } - } - fn _insert(&mut self, key: &[u8], value: &[u8]) { - let codec = Zenoh080; - let mut writer = self.inner.writer(); - codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure - codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure - } - /// Inserts a key-value pair to the attachment. - /// - /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. - pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( - &mut self, - key: &Key, - value: &Value, - ) { - self._insert(key.as_ref(), value.as_ref()) - } - pub fn build(self) -> Attachment { - Attachment { - inner: self.inner.into(), - } - } - } - #[zenoh_macros::unstable] - impl From for Attachment { - fn from(value: AttachmentBuilder) -> Self { - Attachment { - inner: value.inner.into(), - } - } - } - #[zenoh_macros::unstable] - impl From for Option { - fn from(value: AttachmentBuilder) -> Self { - if value.inner.is_empty() { - None - } else { - Some(value.into()) - } - } - } - - #[zenoh_macros::unstable] - #[derive(Clone)] - pub struct Attachment { - pub(crate) inner: ZBuf, - } - #[zenoh_macros::unstable] - impl Default for Attachment { - fn default() -> Self { - Self::new() - } - } - #[zenoh_macros::unstable] - impl From for AttachmentType { - fn from(this: Attachment) -> Self { - AttachmentType { buffer: this.inner } - } - } - #[zenoh_macros::unstable] - impl From> for Attachment { - fn from(this: AttachmentType) -> Self { - Attachment { inner: this.buffer } - } - } - #[zenoh_macros::unstable] - impl Attachment { - pub fn new() -> Self { - Self { - inner: ZBuf::empty(), - } - } - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - pub fn len(&self) -> usize { - self.iter().count() - } - pub fn iter(&self) -> AttachmentIterator { - self.into_iter() - } - fn _get(&self, key: &[u8]) -> Option { - self.iter() - .find_map(|(k, v)| (k.as_slice() == key).then_some(v)) - } - pub fn get>(&self, key: &Key) -> Option { - self._get(key.as_ref()) - } - fn _insert(&mut self, key: &[u8], value: &[u8]) { - let codec = Zenoh080; - let mut writer = self.inner.writer(); - codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure - codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure - } - /// Inserts a key-value pair to the attachment. - /// - /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. - /// - /// [`Attachment`] is not very efficient at inserting, so if you wish to perform multiple inserts, it's generally better to [`Attachment::extend`] after performing the inserts on an [`AttachmentBuilder`] - pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( - &mut self, - key: &Key, - value: &Value, - ) { - self._insert(key.as_ref(), value.as_ref()) - } - fn _extend(&mut self, with: Self) -> &mut Self { - for slice in with.inner.zslices().cloned() { - self.inner.push_zslice(slice); - } - self - } - pub fn extend(&mut self, with: impl Into) -> &mut Self { - let with = with.into(); - self._extend(with) - } - } - #[zenoh_macros::unstable] - pub struct AttachmentIterator<'a> { - reader: ZBufReader<'a>, - } - #[zenoh_macros::unstable] - impl<'a> core::iter::IntoIterator for &'a Attachment { - type Item = (ZSlice, ZSlice); - type IntoIter = AttachmentIterator<'a>; - fn into_iter(self) -> Self::IntoIter { - AttachmentIterator { - reader: self.inner.reader(), - } - } - } - #[zenoh_macros::unstable] - impl core::fmt::Debug for Attachment { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{{")?; - for (key, value) in self { - let key = key.as_slice(); - let value = value.as_slice(); - match core::str::from_utf8(key) { - Ok(key) => write!(f, "\"{key}\": ")?, - Err(_) => { - write!(f, "0x")?; - for byte in key { - write!(f, "{byte:02X}")? - } - } - } - match core::str::from_utf8(value) { - Ok(value) => write!(f, "\"{value}\", ")?, - Err(_) => { - write!(f, "0x")?; - for byte in value { - write!(f, "{byte:02X}")? - } - write!(f, ", ")? - } - } - } - write!(f, "}}") - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::Iterator for AttachmentIterator<'a> { - type Item = (ZSlice, ZSlice); - fn next(&mut self) -> Option { - let key = Zenoh080.read(&mut self.reader).ok()?; - let value = Zenoh080.read(&mut self.reader).ok()?; - Some((key, value)) - } - fn size_hint(&self) -> (usize, Option) { - ( - (self.reader.remaining() != 0) as usize, - Some(self.reader.remaining() / 2), - ) - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for AttachmentBuilder { - fn from_iter>(iter: T) -> Self { - let codec = Zenoh080; - let mut buffer: Vec = Vec::new(); - let mut writer = buffer.writer(); - for (key, value) in iter { - codec.write(&mut writer, key).unwrap(); // Infallible, barring allocation failures - codec.write(&mut writer, value).unwrap(); // Infallible, barring allocation failures - } - Self { inner: buffer } - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for Attachment { - fn from_iter>(iter: T) -> Self { - AttachmentBuilder::from_iter(iter).into() - } - } -} - -/// The kind of a `Sample`. -#[repr(u8)] -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -pub enum SampleKind { - /// if the `Sample` was issued by a `put` operation. - #[default] - Put = 0, - /// if the `Sample` was issued by a `delete` operation. - Delete = 1, -} - -impl fmt::Display for SampleKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - SampleKind::Put => write!(f, "PUT"), - SampleKind::Delete => write!(f, "DELETE"), - } - } -} - -impl TryFrom for SampleKind { - type Error = u64; - fn try_from(kind: u64) -> Result { - match kind { - 0 => Ok(SampleKind::Put), - 1 => Ok(SampleKind::Delete), - _ => Err(kind), - } - } -} - -#[zenoh_macros::unstable] -pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; - -/// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. -pub struct SampleFields { - pub key_expr: KeyExpr<'static>, - pub payload: Payload, - pub kind: SampleKind, - pub encoding: Encoding, - pub timestamp: Option, - pub express: bool, - pub priority: Priority, - pub congestion_control: CongestionControl, - #[cfg(feature = "unstable")] - pub source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub attachment: Option, -} - -impl From for SampleFields { - fn from(sample: Sample) -> Self { - SampleFields { - key_expr: sample.key_expr, - payload: sample.payload, - kind: sample.kind, - encoding: sample.encoding, - timestamp: sample.timestamp, - express: sample.qos.express(), - priority: sample.qos.priority(), - congestion_control: sample.qos.congestion_control(), - #[cfg(feature = "unstable")] - source_info: sample.source_info, - #[cfg(feature = "unstable")] - attachment: sample.attachment, - } - } -} - -/// A zenoh sample. -#[non_exhaustive] -#[derive(Clone, Debug)] -pub struct Sample { - pub(crate) key_expr: KeyExpr<'static>, - pub(crate) payload: Payload, - pub(crate) kind: SampleKind, - pub(crate) encoding: Encoding, - pub(crate) timestamp: Option, - pub(crate) qos: QoS, - - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, - - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -impl Sample { - /// Gets the key expression on which this Sample was published. - #[inline] - pub fn key_expr(&self) -> &KeyExpr<'static> { - &self.key_expr - } - - /// Gets the payload of this Sample. - #[inline] - pub fn payload(&self) -> &Payload { - &self.payload - } - - /// Gets the kind of this Sample. - #[inline] - pub fn kind(&self) -> SampleKind { - self.kind - } - - /// Gets the encoding of this sample - #[inline] - pub fn encoding(&self) -> &Encoding { - &self.encoding - } - - /// Gets the timestamp of this Sample. - #[inline] - pub fn timestamp(&self) -> Option<&Timestamp> { - self.timestamp.as_ref() - } - - /// Gets the quality of service settings this Sample was sent with. - #[inline] - pub fn qos(&self) -> &QoS { - &self.qos - } - - /// Gets infos on the source of this Sample. - #[zenoh_macros::unstable] - #[inline] - pub fn source_info(&self) -> &SourceInfo { - &self.source_info - } - - /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. - #[zenoh_macros::unstable] - #[inline] - pub fn attachment(&self) -> Option<&Attachment> { - self.attachment.as_ref() - } -} - -impl From for Value { - fn from(sample: Sample) -> Self { - Value::new(sample.payload).encoding(sample.encoding) - } -} - -/// Structure containing quality of service data -#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] -pub struct QoS { - inner: QoSType, -} - -#[derive(Debug)] -pub struct QoSBuilder(QoS); - -impl From for QoSBuilder { - fn from(qos: QoS) -> Self { - QoSBuilder(qos) - } -} - -impl From for QoS { - fn from(builder: QoSBuilder) -> Self { - builder.0 - } -} - -impl QoSBuilderTrait for QoSBuilder { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - let mut inner = self.0.inner; - inner.set_congestion_control(congestion_control); - Self(QoS { inner }) - } - - fn priority(self, priority: Priority) -> Self { - let mut inner = self.0.inner; - inner.set_priority(priority.into()); - Self(QoS { inner }) - } - - fn express(self, is_express: bool) -> Self { - let mut inner = self.0.inner; - inner.set_is_express(is_express); - Self(QoS { inner }) - } -} - -impl QoS { - /// Gets priority of the message. - pub fn priority(&self) -> Priority { - match Priority::try_from(self.inner.get_priority()) { - Ok(p) => p, - Err(e) => { - log::trace!( - "Failed to convert priority: {}; replacing with default value", - e.to_string() - ); - Priority::default() - } - } - } - - /// Gets congestion control of the message. - pub fn congestion_control(&self) -> CongestionControl { - self.inner.get_congestion_control() - } - - /// Gets express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. - pub fn express(&self) -> bool { - self.inner.is_express() - } -} - -impl From for QoS { - fn from(qos: QoSType) -> Self { - QoS { inner: qos } - } -} - -impl From for QoSType { - fn from(qos: QoS) -> Self { - qos.inner - } -} From 9d1a5409541831926e70420fdf89006a67b1020c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 18:37:23 +0100 Subject: [PATCH 087/357] Remove error op struct in SampleBuilder --- zenoh/src/sample/builder.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 1ec20209aa..7f438d1381 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -70,8 +70,6 @@ pub mod op { #[derive(Debug)] pub struct Delete; #[derive(Debug)] - pub struct Error; - #[derive(Debug)] pub struct Any; } From 7904d099ba3d069ecc51b76241ef136678a5e005 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 29 Mar 2024 20:29:43 +0100 Subject: [PATCH 088/357] Add forgotten file --- zenoh/src/sample/mod.rs | 655 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 655 insertions(+) create mode 100644 zenoh/src/sample/mod.rs diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs new file mode 100644 index 0000000000..2b71105d5e --- /dev/null +++ b/zenoh/src/sample/mod.rs @@ -0,0 +1,655 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Sample primitives +use crate::encoding::Encoding; +use crate::payload::Payload; +use crate::prelude::{KeyExpr, Value}; +use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; +use crate::time::Timestamp; +use crate::Priority; +#[zenoh_macros::unstable] +use serde::Serialize; +use std::{convert::TryFrom, fmt}; +use zenoh_protocol::core::EntityGlobalId; +use zenoh_protocol::network::declare::ext::QoSType; +use zenoh_protocol::{core::CongestionControl, zenoh}; + +pub mod builder; + +pub type SourceSn = u64; + +/// The locality of samples to be received by subscribers or targeted by publishers. +#[zenoh_macros::unstable] +#[derive(Clone, Copy, Debug, Default, Serialize, PartialEq, Eq)] +pub enum Locality { + SessionLocal, + Remote, + #[default] + Any, +} +#[cfg(not(feature = "unstable"))] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub(crate) enum Locality { + SessionLocal, + Remote, + #[default] + Any, +} + +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub(crate) struct DataInfo { + pub kind: SampleKind, + pub encoding: Option, + pub timestamp: Option, + pub source_id: Option, + pub source_sn: Option, + pub qos: QoS, +} + +pub(crate) trait DataInfoIntoSample { + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into; +} + +impl DataInfoIntoSample for DataInfo { + // This function is for internal use only. + // Technically it may create invalid sample (e.g. a delete sample with a payload and encoding) + // The test for it is intentionally not added to avoid inserting extra "if" into hot path. + // The correctness of the data should be ensured by the caller. + #[inline] + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { + Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: self.kind, + encoding: self.encoding.unwrap_or_default(), + timestamp: self.timestamp, + qos: self.qos, + #[cfg(feature = "unstable")] + source_info: SourceInfo { + source_id: self.source_id, + source_sn: self.source_sn, + }, + #[cfg(feature = "unstable")] + attachment, + } + } +} + +impl DataInfoIntoSample for Option { + #[inline] + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoPayload, + #[cfg(feature = "unstable")] attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoPayload: Into, + { + if let Some(data_info) = self { + data_info.into_sample(key_expr, payload, attachment) + } else { + Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment, + } + } + } +} + +/// Informations on the source of a zenoh [`Sample`]. +#[zenoh_macros::unstable] +#[derive(Debug, Clone)] +pub struct SourceInfo { + /// The [`EntityGlobalId`] of the zenoh entity that published the concerned [`Sample`]. + pub source_id: Option, + /// The sequence number of the [`Sample`] from the source. + pub source_sn: Option, +} + +#[test] +#[cfg(feature = "unstable")] +fn source_info_stack_size() { + use crate::{ + sample::{SourceInfo, SourceSn}, + ZenohId, + }; + + assert_eq!(std::mem::size_of::(), 16); + assert_eq!(std::mem::size_of::>(), 17); + assert_eq!(std::mem::size_of::>(), 16); + assert_eq!(std::mem::size_of::(), 17 + 16 + 7); +} + +#[zenoh_macros::unstable] +impl SourceInfo { + pub(crate) fn empty() -> Self { + SourceInfo { + source_id: None, + source_sn: None, + } + } + pub(crate) fn is_empty(&self) -> bool { + self.source_id.is_none() && self.source_sn.is_none() + } +} + +impl From for Option { + fn from(source_info: SourceInfo) -> Option { + if source_info.is_empty() { + None + } else { + Some(zenoh::put::ext::SourceInfoType { + id: source_info.source_id.unwrap_or_default(), + sn: source_info.source_sn.unwrap_or_default() as u32, + }) + } + } +} + +#[zenoh_macros::unstable] +impl From for SourceInfo { + fn from(data_info: DataInfo) -> Self { + SourceInfo { + source_id: data_info.source_id, + source_sn: data_info.source_sn, + } + } +} + +#[zenoh_macros::unstable] +impl From> for SourceInfo { + fn from(data_info: Option) -> Self { + match data_info { + Some(data_info) => data_info.into(), + None => SourceInfo::empty(), + } + } +} + +mod attachment { + #[zenoh_macros::unstable] + use zenoh_buffers::{ + reader::{HasReader, Reader}, + writer::HasWriter, + ZBuf, ZBufReader, ZSlice, + }; + #[zenoh_macros::unstable] + use zenoh_codec::{RCodec, WCodec, Zenoh080}; + #[zenoh_macros::unstable] + use zenoh_protocol::zenoh::ext::AttachmentType; + + /// A builder for [`Attachment`] + #[zenoh_macros::unstable] + #[derive(Debug)] + pub struct AttachmentBuilder { + pub(crate) inner: Vec, + } + #[zenoh_macros::unstable] + impl Default for AttachmentBuilder { + fn default() -> Self { + Self::new() + } + } + #[zenoh_macros::unstable] + impl AttachmentBuilder { + pub fn new() -> Self { + Self { inner: Vec::new() } + } + fn _insert(&mut self, key: &[u8], value: &[u8]) { + let codec = Zenoh080; + let mut writer = self.inner.writer(); + codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure + codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure + } + /// Inserts a key-value pair to the attachment. + /// + /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. + pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( + &mut self, + key: &Key, + value: &Value, + ) { + self._insert(key.as_ref(), value.as_ref()) + } + pub fn build(self) -> Attachment { + Attachment { + inner: self.inner.into(), + } + } + } + #[zenoh_macros::unstable] + impl From for Attachment { + fn from(value: AttachmentBuilder) -> Self { + Attachment { + inner: value.inner.into(), + } + } + } + #[zenoh_macros::unstable] + impl From for Option { + fn from(value: AttachmentBuilder) -> Self { + if value.inner.is_empty() { + None + } else { + Some(value.into()) + } + } + } + + #[zenoh_macros::unstable] + #[derive(Clone)] + pub struct Attachment { + pub(crate) inner: ZBuf, + } + #[zenoh_macros::unstable] + impl Default for Attachment { + fn default() -> Self { + Self::new() + } + } + #[zenoh_macros::unstable] + impl From for AttachmentType { + fn from(this: Attachment) -> Self { + AttachmentType { buffer: this.inner } + } + } + #[zenoh_macros::unstable] + impl From> for Attachment { + fn from(this: AttachmentType) -> Self { + Attachment { inner: this.buffer } + } + } + #[zenoh_macros::unstable] + impl Attachment { + pub fn new() -> Self { + Self { + inner: ZBuf::empty(), + } + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn len(&self) -> usize { + self.iter().count() + } + pub fn iter(&self) -> AttachmentIterator { + self.into_iter() + } + fn _get(&self, key: &[u8]) -> Option { + self.iter() + .find_map(|(k, v)| (k.as_slice() == key).then_some(v)) + } + pub fn get>(&self, key: &Key) -> Option { + self._get(key.as_ref()) + } + fn _insert(&mut self, key: &[u8], value: &[u8]) { + let codec = Zenoh080; + let mut writer = self.inner.writer(); + codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure + codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure + } + /// Inserts a key-value pair to the attachment. + /// + /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. + /// + /// [`Attachment`] is not very efficient at inserting, so if you wish to perform multiple inserts, it's generally better to [`Attachment::extend`] after performing the inserts on an [`AttachmentBuilder`] + pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( + &mut self, + key: &Key, + value: &Value, + ) { + self._insert(key.as_ref(), value.as_ref()) + } + fn _extend(&mut self, with: Self) -> &mut Self { + for slice in with.inner.zslices().cloned() { + self.inner.push_zslice(slice); + } + self + } + pub fn extend(&mut self, with: impl Into) -> &mut Self { + let with = with.into(); + self._extend(with) + } + } + #[zenoh_macros::unstable] + pub struct AttachmentIterator<'a> { + reader: ZBufReader<'a>, + } + #[zenoh_macros::unstable] + impl<'a> core::iter::IntoIterator for &'a Attachment { + type Item = (ZSlice, ZSlice); + type IntoIter = AttachmentIterator<'a>; + fn into_iter(self) -> Self::IntoIter { + AttachmentIterator { + reader: self.inner.reader(), + } + } + } + #[zenoh_macros::unstable] + impl core::fmt::Debug for Attachment { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{{")?; + for (key, value) in self { + let key = key.as_slice(); + let value = value.as_slice(); + match core::str::from_utf8(key) { + Ok(key) => write!(f, "\"{key}\": ")?, + Err(_) => { + write!(f, "0x")?; + for byte in key { + write!(f, "{byte:02X}")? + } + } + } + match core::str::from_utf8(value) { + Ok(value) => write!(f, "\"{value}\", ")?, + Err(_) => { + write!(f, "0x")?; + for byte in value { + write!(f, "{byte:02X}")? + } + write!(f, ", ")? + } + } + } + write!(f, "}}") + } + } + #[zenoh_macros::unstable] + impl<'a> core::iter::Iterator for AttachmentIterator<'a> { + type Item = (ZSlice, ZSlice); + fn next(&mut self) -> Option { + let key = Zenoh080.read(&mut self.reader).ok()?; + let value = Zenoh080.read(&mut self.reader).ok()?; + Some((key, value)) + } + fn size_hint(&self) -> (usize, Option) { + ( + (self.reader.remaining() != 0) as usize, + Some(self.reader.remaining() / 2), + ) + } + } + #[zenoh_macros::unstable] + impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for AttachmentBuilder { + fn from_iter>(iter: T) -> Self { + let codec = Zenoh080; + let mut buffer: Vec = Vec::new(); + let mut writer = buffer.writer(); + for (key, value) in iter { + codec.write(&mut writer, key).unwrap(); // Infallible, barring allocation failures + codec.write(&mut writer, value).unwrap(); // Infallible, barring allocation failures + } + Self { inner: buffer } + } + } + #[zenoh_macros::unstable] + impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for Attachment { + fn from_iter>(iter: T) -> Self { + AttachmentBuilder::from_iter(iter).into() + } + } +} + +/// The kind of a `Sample`. +#[repr(u8)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] +pub enum SampleKind { + /// if the `Sample` was issued by a `put` operation. + #[default] + Put = 0, + /// if the `Sample` was issued by a `delete` operation. + Delete = 1, +} + +impl fmt::Display for SampleKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SampleKind::Put => write!(f, "PUT"), + SampleKind::Delete => write!(f, "DELETE"), + } + } +} + +impl TryFrom for SampleKind { + type Error = u64; + fn try_from(kind: u64) -> Result { + match kind { + 0 => Ok(SampleKind::Put), + 1 => Ok(SampleKind::Delete), + _ => Err(kind), + } + } +} + +#[zenoh_macros::unstable] +pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; + +/// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. +pub struct SampleFields { + pub key_expr: KeyExpr<'static>, + pub payload: Payload, + pub kind: SampleKind, + pub encoding: Encoding, + pub timestamp: Option, + pub express: bool, + pub priority: Priority, + pub congestion_control: CongestionControl, + #[cfg(feature = "unstable")] + pub source_info: SourceInfo, + #[cfg(feature = "unstable")] + pub attachment: Option, +} + +impl From for SampleFields { + fn from(sample: Sample) -> Self { + SampleFields { + key_expr: sample.key_expr, + payload: sample.payload, + kind: sample.kind, + encoding: sample.encoding, + timestamp: sample.timestamp, + express: sample.qos.express(), + priority: sample.qos.priority(), + congestion_control: sample.qos.congestion_control(), + #[cfg(feature = "unstable")] + source_info: sample.source_info, + #[cfg(feature = "unstable")] + attachment: sample.attachment, + } + } +} + +/// A zenoh sample. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub struct Sample { + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) payload: Payload, + pub(crate) kind: SampleKind, + pub(crate) encoding: Encoding, + pub(crate) timestamp: Option, + pub(crate) qos: QoS, + + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, + + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} + +impl Sample { + /// Gets the key expression on which this Sample was published. + #[inline] + pub fn key_expr(&self) -> &KeyExpr<'static> { + &self.key_expr + } + + /// Gets the payload of this Sample. + #[inline] + pub fn payload(&self) -> &Payload { + &self.payload + } + + /// Gets the kind of this Sample. + #[inline] + pub fn kind(&self) -> SampleKind { + self.kind + } + + /// Gets the encoding of this sample + #[inline] + pub fn encoding(&self) -> &Encoding { + &self.encoding + } + + /// Gets the timestamp of this Sample. + #[inline] + pub fn timestamp(&self) -> Option<&Timestamp> { + self.timestamp.as_ref() + } + + /// Gets the quality of service settings this Sample was sent with. + #[inline] + pub fn qos(&self) -> &QoS { + &self.qos + } + + /// Gets infos on the source of this Sample. + #[zenoh_macros::unstable] + #[inline] + pub fn source_info(&self) -> &SourceInfo { + &self.source_info + } + + /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. + #[zenoh_macros::unstable] + #[inline] + pub fn attachment(&self) -> Option<&Attachment> { + self.attachment.as_ref() + } +} + +impl From for Value { + fn from(sample: Sample) -> Self { + Value::new(sample.payload).encoding(sample.encoding) + } +} + +/// Structure containing quality of service data +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] +pub struct QoS { + inner: QoSType, +} + +#[derive(Debug)] +pub struct QoSBuilder(QoS); + +impl From for QoSBuilder { + fn from(qos: QoS) -> Self { + QoSBuilder(qos) + } +} + +impl From for QoS { + fn from(builder: QoSBuilder) -> Self { + builder.0 + } +} + +impl QoSBuilderTrait for QoSBuilder { + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let mut inner = self.0.inner; + inner.set_congestion_control(congestion_control); + Self(QoS { inner }) + } + + fn priority(self, priority: Priority) -> Self { + let mut inner = self.0.inner; + inner.set_priority(priority.into()); + Self(QoS { inner }) + } + + fn express(self, is_express: bool) -> Self { + let mut inner = self.0.inner; + inner.set_is_express(is_express); + Self(QoS { inner }) + } +} + +impl QoS { + /// Gets priority of the message. + pub fn priority(&self) -> Priority { + match Priority::try_from(self.inner.get_priority()) { + Ok(p) => p, + Err(e) => { + log::trace!( + "Failed to convert priority: {}; replacing with default value", + e.to_string() + ); + Priority::default() + } + } + } + + /// Gets congestion control of the message. + pub fn congestion_control(&self) -> CongestionControl { + self.inner.get_congestion_control() + } + + /// Gets express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. + pub fn express(&self) -> bool { + self.inner.is_express() + } +} + +impl From for QoS { + fn from(qos: QoSType) -> Self { + QoS { inner: qos } + } +} + +impl From for QoSType { + fn from(qos: QoS) -> Self { + qos.inner + } +} From ab349b2e91ee2fce1b0776526f6bb26af26a3b76 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 11:46:29 +0200 Subject: [PATCH 089/357] support of TryIntoKeyexpr --- zenoh/src/key_expr.rs | 2 +- zenoh/src/queryable.rs | 144 ++++++++++++++++++++++++++++------------ zenoh/src/sample/mod.rs | 6 ++ 3 files changed, 107 insertions(+), 45 deletions(-) diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index aaa1d13724..d2bfb5bcfe 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -185,7 +185,7 @@ impl<'a> KeyExpr<'a> { /// # Safety /// Key Expressions must follow some rules to be accepted by a Zenoh network. /// Messages addressed with invalid key expressions will be dropped. - pub unsafe fn from_str_uncheckend(s: &'a str) -> Self { + pub unsafe fn from_str_unchecked(s: &'a str) -> Self { keyexpr::from_str_unchecked(s).into() } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index c2a5557440..37c3a2303a 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -19,10 +19,9 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::builder::{ - op, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, - ValueBuilderTrait, + QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; -use crate::sample::SourceInfo; +use crate::sample::{QoSBuilder, SourceInfo}; use crate::Id; use crate::SessionRef; use crate::Undeclarable; @@ -132,17 +131,23 @@ impl Query { &self, key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> ReplySampleBuilder<'_, op::Put> + ) -> ReplyBuilder<'_, 'b, ReplyBuilderPut> where TryIntoKeyExpr: TryInto>, >>::Error: Into, IntoPayload: Into, { - let sample_builder = - SampleBuilder::put(key_expr, payload).qos(response::ext::QoSType::RESPONSE.into()); - ReplySampleBuilder { + ReplyBuilder { query: self, - sample_builder, + key_expr: key_expr.try_into().map_err(Into::into), + qos: response::ext::QoSType::RESPONSE.into(), + kind: ReplyBuilderPut { + payload: payload.into(), + encoding: Encoding::default(), + }, + timestamp: None, + source_info: SourceInfo::empty(), + attachment: None, } } @@ -165,19 +170,22 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply_del( + pub fn reply_del<'b, TryIntoKeyExpr>( &self, - key_expr: IntoKeyExpr, - ) -> ReplySampleBuilder<'_, op::Delete> + key_expr: TryIntoKeyExpr, + ) -> ReplyBuilder<'_, 'b, ReplyBuilderDelete> where TryIntoKeyExpr: TryInto>, >>::Error: Into, { - let sample_builder = - SampleBuilder::delete(key_expr).qos(response::ext::QoSType::RESPONSE.into()); - ReplySampleBuilder { + ReplyBuilder { query: self, - sample_builder, + key_expr: key_expr.try_into().map_err(Into::into), + qos: response::ext::QoSType::RESPONSE.into(), + kind: ReplyBuilderDelete, + timestamp: None, + source_info: SourceInfo::empty(), + attachment: None, } } @@ -243,28 +251,45 @@ impl AsyncResolve for ReplySample<'_> { } } -/// A builder returned by [`Query::reply()`](Query::reply) +#[derive(Debug)] +pub struct ReplyBuilderPut { + payload: super::Payload, + encoding: super::Encoding, +} +#[derive(Debug)] +pub struct ReplyBuilderDelete; + +/// A builder returned by [`Query::reply()`](Query::reply) and [`Query::reply_del()`](Query::reply_del) #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ReplySampleBuilder<'a, T> { +pub struct ReplyBuilder<'a, 'b, T> { query: &'a Query, - sample_builder: SampleBuilder, + key_expr: ZResult>, + kind: T, + timestamp: Option, + qos: QoSBuilder, + + #[cfg(feature = "unstable")] + source_info: SourceInfo, + + #[cfg(feature = "unstable")] + attachment: Option, } -impl TimestampBuilderTrait for ReplySampleBuilder<'_, T> { +impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { fn timestamp>>(self, timestamp: U) -> Self { Self { - sample_builder: self.sample_builder.timestamp(timestamp), + timestamp: timestamp.into(), ..self } } } -impl SampleBuilderTrait for ReplySampleBuilder<'_, T> { +impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { Self { - sample_builder: self.sample_builder.source_info(source_info), + source_info, ..self } } @@ -272,69 +297,100 @@ impl SampleBuilderTrait for ReplySampleBuilder<'_, T> { #[cfg(feature = "unstable")] fn attachment>>(self, attachment: U) -> Self { Self { - sample_builder: self.sample_builder.attachment(attachment), + attachment: attachment.into(), ..self } } } -impl QoSBuilderTrait for ReplySampleBuilder<'_, T> { +impl QoSBuilderTrait for ReplyBuilder<'_, '_, T> { fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - sample_builder: self.sample_builder.congestion_control(congestion_control), - ..self - } + let qos = self.qos.congestion_control(congestion_control); + Self { qos, ..self } } fn priority(self, priority: Priority) -> Self { - Self { - sample_builder: self.sample_builder.priority(priority), - ..self - } + let qos = self.qos.priority(priority); + Self { qos, ..self } } fn express(self, is_express: bool) -> Self { - Self { - sample_builder: self.sample_builder.express(is_express), - ..self - } + let qos = self.qos.express(is_express); + Self { qos, ..self } } } -impl ValueBuilderTrait for ReplySampleBuilder<'_, op::Put> { +impl ValueBuilderTrait for ReplyBuilder<'_, '_, ReplyBuilderPut> { fn encoding>(self, encoding: T) -> Self { Self { - sample_builder: self.sample_builder.encoding(encoding), + kind: ReplyBuilderPut { + encoding: encoding.into(), + ..self.kind + }, ..self } } fn payload>(self, payload: T) -> Self { Self { - sample_builder: self.sample_builder.payload(payload), + kind: ReplyBuilderPut { + payload: payload.into(), + ..self.kind + }, ..self } } fn value>(self, value: T) -> Self { let Value { payload, encoding } = value.into(); Self { - sample_builder: self.sample_builder.payload(payload).encoding(encoding), + kind: ReplyBuilderPut { payload, encoding }, ..self } } } -impl<'a, T> Resolvable for ReplySampleBuilder<'a, T> { +impl Resolvable for ReplyBuilder<'_, '_, T> { type To = ZResult<()>; } -impl SyncResolve for ReplySampleBuilder<'_, T> { +impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { + fn res_sync(self) -> ::To { + let key_expr = self.key_expr?.into_owned(); + let sample = SampleBuilder::put(key_expr, self.kind.payload) + .encoding(self.kind.encoding) + .timestamp(self.timestamp) + .qos(self.qos.into()); + #[cfg(feature = "unstable")] + let sample = sample.source_info(self.source_info); + #[cfg(feature = "unstable")] + let sample = sample.attachment(self.attachment); + self.query._reply_sample(sample.into()) + } +} + +impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { fn res_sync(self) -> ::To { - self.query._reply_sample(self.sample_builder.into()) + let key_expr = self.key_expr?.into_owned(); + let sample = SampleBuilder::delete(key_expr) + .timestamp(self.timestamp) + .qos(self.qos.into()); + #[cfg(feature = "unstable")] + let sample = sample.source_info(self.source_info); + #[cfg(feature = "unstable")] + let sample = sample.attachment(self.attachment); + self.query._reply_sample(sample.into()) + } +} + +impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) } } -impl<'a, T> AsyncResolve for ReplySampleBuilder<'a, T> { +impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { type Future = Ready; fn res_async(self) -> Self::Future { diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 2b71105d5e..be80f8277e 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -590,6 +590,12 @@ impl From for QoSBuilder { } } +impl From for QoSBuilder { + fn from(qos: QoSType) -> Self { + QoSBuilder(QoS { inner: qos }) + } +} + impl From for QoS { fn from(builder: QoSBuilder) -> Self { builder.0 From e4c4be1d4c5dd5b02ed539a57eba324c6e5b2a07 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 11:53:44 +0200 Subject: [PATCH 090/357] removed "op" namespace to align naming with ReplyBuilder --- zenoh/src/sample/builder.rs | 46 ++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 7f438d1381..fd697e942a 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -64,14 +64,12 @@ pub trait ValueBuilderTrait { fn value>(self, value: T) -> Self; } -pub mod op { - #[derive(Debug)] - pub struct Put; - #[derive(Debug)] - pub struct Delete; - #[derive(Debug)] - pub struct Any; -} +#[derive(Debug)] +pub struct SampleBuilderPut; +#[derive(Debug)] +pub struct SampleBuilderDelete; +#[derive(Debug)] +pub struct SampleBuilderAny; #[derive(Debug)] pub struct SampleBuilder { @@ -79,11 +77,11 @@ pub struct SampleBuilder { _t: PhantomData, } -impl SampleBuilder { +impl SampleBuilder { pub fn put( key_expr: IntoKeyExpr, payload: IntoPayload, - ) -> SampleBuilder + ) -> SampleBuilder where IntoKeyExpr: Into>, IntoPayload: Into, @@ -101,13 +99,13 @@ impl SampleBuilder { #[cfg(feature = "unstable")] attachment: None, }, - _t: PhantomData::, + _t: PhantomData::, } } } -impl SampleBuilder { - pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder +impl SampleBuilder { + pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder where IntoKeyExpr: Into>, { @@ -124,7 +122,7 @@ impl SampleBuilder { #[cfg(feature = "unstable")] attachment: None, }, - _t: PhantomData::, + _t: PhantomData::, } } } @@ -216,14 +214,14 @@ impl QoSBuilderTrait for SampleBuilder { } } -impl ValueBuilderTrait for SampleBuilder { +impl ValueBuilderTrait for SampleBuilder { fn encoding>(self, encoding: T) -> Self { Self { sample: Sample { encoding: encoding.into(), ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } fn payload>(self, payload: T) -> Self { @@ -232,7 +230,7 @@ impl ValueBuilderTrait for SampleBuilder { payload: payload.into(), ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } fn value>(self, value: T) -> Self { @@ -243,21 +241,21 @@ impl ValueBuilderTrait for SampleBuilder { encoding, ..self.sample }, - _t: PhantomData::, + _t: PhantomData::, } } } -impl From for SampleBuilder { +impl From for SampleBuilder { fn from(sample: Sample) -> Self { SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, } } } -impl TryFrom for SampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Put { @@ -265,12 +263,12 @@ impl TryFrom for SampleBuilder { } Ok(SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, }) } } -impl TryFrom for SampleBuilder { +impl TryFrom for SampleBuilder { type Error = zresult::Error; fn try_from(sample: Sample) -> Result { if sample.kind != SampleKind::Delete { @@ -278,7 +276,7 @@ impl TryFrom for SampleBuilder { } Ok(SampleBuilder { sample, - _t: PhantomData::, + _t: PhantomData::, }) } } From d631f761620b377cd9460f275c4f6deeef61e996 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 13:25:59 +0200 Subject: [PATCH 091/357] publication builder shortened --- zenoh/src/publication.rs | 145 ++++++++---------------------------- zenoh/src/sample/builder.rs | 8 +- zenoh/src/session.rs | 15 ++-- 3 files changed, 46 insertions(+), 122 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 1d62375cdd..69715a0867 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -41,33 +41,15 @@ use zenoh_result::ZResult; /// The kind of congestion control. pub use zenoh_protocol::core::CongestionControl; -/// A builder for initializing a [`delete`](crate::Session::delete) operation. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// session -/// .delete("key/expression") -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -pub struct DeleteBuilder<'a, 'b> { - pub(crate) publisher: PublisherBuilder<'a, 'b>, - pub(crate) timestamp: Option, - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, +#[derive(Debug, Clone)] +pub struct PublicationBuilderPut { + pub(crate) payload: Payload, + pub(crate) encoding: Encoding, } +#[derive(Debug, Clone)] +pub struct PublicationBuilderDelete; -/// A builder for initializing a [`put`](crate::Session::put) operation. +/// A builder for initializing a [`put`](crate::Session::put) and [`delete`](crate::Session::delete) operations /// /// # Examples /// ``` @@ -89,10 +71,9 @@ pub struct DeleteBuilder<'a, 'b> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug, Clone)] -pub struct PutBuilder<'a, 'b> { +pub struct PublicationBuilder<'a, 'b, T> { pub(crate) publisher: PublisherBuilder<'a, 'b>, - pub(crate) payload: Payload, - pub(crate) encoding: Encoding, + pub(crate) kind: T, pub(crate) timestamp: Option, #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, @@ -100,7 +81,7 @@ pub struct PutBuilder<'a, 'b> { pub(crate) attachment: Option, } -impl QoSBuilderTrait for PutBuilder<'_, '_> { +impl QoSBuilderTrait for PublicationBuilder<'_, '_, T> { #[inline] fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { @@ -124,58 +105,8 @@ impl QoSBuilderTrait for PutBuilder<'_, '_> { } } -impl QoSBuilderTrait for DeleteBuilder<'_, '_> { - #[inline] - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - publisher: self.publisher.congestion_control(congestion_control), - ..self - } - } - #[inline] - fn priority(self, priority: Priority) -> Self { - Self { - publisher: self.publisher.priority(priority), - ..self - } - } - #[inline] - fn express(self, is_express: bool) -> Self { - Self { - publisher: self.publisher.express(is_express), - ..self - } - } -} - -impl TimestampBuilderTrait for PutBuilder<'_, '_> { - fn timestamp>>(self, timestamp: T) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -impl SampleBuilderTrait for PutBuilder<'_, '_> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - attachment: attachment.into(), - ..self - } - } -} - -impl TimestampBuilderTrait for DeleteBuilder<'_, '_> { - fn timestamp>>(self, timestamp: T) -> Self { +impl TimestampBuilderTrait for PublicationBuilder<'_, '_, T> { + fn timestamp>>(self, timestamp: TS) -> Self { Self { timestamp: timestamp.into(), ..self @@ -183,7 +114,7 @@ impl TimestampBuilderTrait for DeleteBuilder<'_, '_> { } } -impl SampleBuilderTrait for DeleteBuilder<'_, '_> { +impl SampleBuilderTrait for PublicationBuilder<'_, '_, T> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { Self { @@ -192,7 +123,7 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { } } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { + fn attachment>>(self, attachment: TA) -> Self { Self { attachment: attachment.into(), ..self @@ -200,10 +131,13 @@ impl SampleBuilderTrait for DeleteBuilder<'_, '_> { } } -impl ValueBuilderTrait for PutBuilder<'_, '_> { +impl ValueBuilderTrait for PublicationBuilder<'_, '_, PublicationBuilderPut> { fn encoding>(self, encoding: T) -> Self { Self { - encoding: encoding.into(), + kind: PublicationBuilderPut { + encoding: encoding.into(), + ..self.kind + }, ..self } } @@ -213,32 +147,23 @@ impl ValueBuilderTrait for PutBuilder<'_, '_> { IntoPayload: Into, { Self { - payload: payload.into(), + kind: PublicationBuilderPut { + payload: payload.into(), + ..self.kind + }, ..self } } fn value>(self, value: T) -> Self { let Value { payload, encoding } = value.into(); Self { - payload, - encoding, + kind: PublicationBuilderPut { payload, encoding }, ..self } } } -impl PutBuilder<'_, '_> { - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.publisher = self.publisher.allowed_destination(destination); - self - } -} - -impl DeleteBuilder<'_, '_> { +impl PublicationBuilder<'_, '_, T> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -249,23 +174,19 @@ impl DeleteBuilder<'_, '_> { } } -impl Resolvable for PutBuilder<'_, '_> { - type To = ZResult<()>; -} - -impl Resolvable for DeleteBuilder<'_, '_> { +impl Resolvable for PublicationBuilder<'_, '_, T> { type To = ZResult<()>; } -impl SyncResolve for PutBuilder<'_, '_> { +impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { #[inline] fn res_sync(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; resolve_put( &publisher, - self.payload, + self.kind.payload, SampleKind::Put, - self.encoding, + self.kind.encoding, self.timestamp, #[cfg(feature = "unstable")] self.source_info, @@ -275,7 +196,7 @@ impl SyncResolve for PutBuilder<'_, '_> { } } -impl SyncResolve for DeleteBuilder<'_, '_> { +impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderDelete> { #[inline] fn res_sync(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; @@ -293,7 +214,7 @@ impl SyncResolve for DeleteBuilder<'_, '_> { } } -impl AsyncResolve for PutBuilder<'_, '_> { +impl AsyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -301,7 +222,7 @@ impl AsyncResolve for PutBuilder<'_, '_> { } } -impl AsyncResolve for DeleteBuilder<'_, '_> { +impl AsyncResolve for PublicationBuilder<'_, '_, PublicationBuilderDelete> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -1038,7 +959,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { self } - // internal function for `PutBuilder` and `DeleteBuilder` + // internal function for perfroming the publication fn create_one_shot_publisher(self) -> ZResult> { Ok(Publisher { session: self.session, diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index fd697e942a..295451abc1 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -64,14 +64,14 @@ pub trait ValueBuilderTrait { fn value>(self, value: T) -> Self; } -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct SampleBuilderPut; -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct SampleBuilderDelete; -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct SampleBuilderAny; -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct SampleBuilder { sample: Sample, _t: PhantomData, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 4d71f58ffa..b1b059d163 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -705,17 +705,19 @@ impl Session { &'a self, key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> PutBuilder<'a, 'b> + ) -> PublicationBuilder<'a, 'b, PublicationBuilderPut> where TryIntoKeyExpr: TryInto>, >>::Error: Into, IntoPayload: Into, { - PutBuilder { + PublicationBuilder { publisher: self.declare_publisher(key_expr), - payload: payload.into(), + kind: PublicationBuilderPut { + payload: payload.into(), + encoding: Encoding::default(), + }, timestamp: None, - encoding: Encoding::default(), #[cfg(feature = "unstable")] attachment: None, #[cfg(feature = "unstable")] @@ -743,13 +745,14 @@ impl Session { pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> DeleteBuilder<'a, 'b> + ) -> PublicationBuilder<'a, 'b, PublicationBuilderDelete> where TryIntoKeyExpr: TryInto>, >>::Error: Into, { - DeleteBuilder { + PublicationBuilder { publisher: self.declare_publisher(key_expr), + kind: PublicationBuilderDelete, timestamp: None, #[cfg(feature = "unstable")] attachment: None, From 9b8aaa69d190547a65084cb5c0be605aa706b67f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 13:45:01 +0200 Subject: [PATCH 092/357] parametrized publication builder --- zenoh/src/publication.rs | 24 ++++++++++++------------ zenoh/src/session.rs | 4 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 69715a0867..dd1818d842 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -71,8 +71,8 @@ pub struct PublicationBuilderDelete; /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug, Clone)] -pub struct PublicationBuilder<'a, 'b, T> { - pub(crate) publisher: PublisherBuilder<'a, 'b>, +pub struct PublicationBuilder { + pub(crate) publisher: P, pub(crate) kind: T, pub(crate) timestamp: Option, #[cfg(feature = "unstable")] @@ -81,7 +81,7 @@ pub struct PublicationBuilder<'a, 'b, T> { pub(crate) attachment: Option, } -impl QoSBuilderTrait for PublicationBuilder<'_, '_, T> { +impl QoSBuilderTrait for PublicationBuilder, T> { #[inline] fn congestion_control(self, congestion_control: CongestionControl) -> Self { Self { @@ -105,7 +105,7 @@ impl QoSBuilderTrait for PublicationBuilder<'_, '_, T> { } } -impl TimestampBuilderTrait for PublicationBuilder<'_, '_, T> { +impl TimestampBuilderTrait for PublicationBuilder { fn timestamp>>(self, timestamp: TS) -> Self { Self { timestamp: timestamp.into(), @@ -114,7 +114,7 @@ impl TimestampBuilderTrait for PublicationBuilder<'_, '_, T> { } } -impl SampleBuilderTrait for PublicationBuilder<'_, '_, T> { +impl SampleBuilderTrait for PublicationBuilder { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { Self { @@ -131,7 +131,7 @@ impl SampleBuilderTrait for PublicationBuilder<'_, '_, T> { } } -impl ValueBuilderTrait for PublicationBuilder<'_, '_, PublicationBuilderPut> { +impl

ValueBuilderTrait for PublicationBuilder { fn encoding>(self, encoding: T) -> Self { Self { kind: PublicationBuilderPut { @@ -163,7 +163,7 @@ impl ValueBuilderTrait for PublicationBuilder<'_, '_, PublicationBuilderPut> { } } -impl PublicationBuilder<'_, '_, T> { +impl PublicationBuilder, T> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -174,11 +174,11 @@ impl PublicationBuilder<'_, '_, T> { } } -impl Resolvable for PublicationBuilder<'_, '_, T> { +impl Resolvable for PublicationBuilder { type To = ZResult<()>; } -impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { +impl SyncResolve for PublicationBuilder, PublicationBuilderPut> { #[inline] fn res_sync(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; @@ -196,7 +196,7 @@ impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { } } -impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderDelete> { +impl SyncResolve for PublicationBuilder, PublicationBuilderDelete> { #[inline] fn res_sync(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; @@ -214,7 +214,7 @@ impl SyncResolve for PublicationBuilder<'_, '_, PublicationBuilderDelete> { } } -impl AsyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { +impl AsyncResolve for PublicationBuilder, PublicationBuilderPut> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -222,7 +222,7 @@ impl AsyncResolve for PublicationBuilder<'_, '_, PublicationBuilderPut> { } } -impl AsyncResolve for PublicationBuilder<'_, '_, PublicationBuilderDelete> { +impl AsyncResolve for PublicationBuilder, PublicationBuilderDelete> { type Future = Ready; fn res_async(self) -> Self::Future { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index b1b059d163..63cc0bb7fa 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -705,7 +705,7 @@ impl Session { &'a self, key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> PublicationBuilder<'a, 'b, PublicationBuilderPut> + ) -> PublicationBuilder, PublicationBuilderPut> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -745,7 +745,7 @@ impl Session { pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> PublicationBuilder<'a, 'b, PublicationBuilderDelete> + ) -> PublicationBuilder, PublicationBuilderDelete> where TryIntoKeyExpr: TryInto>, >>::Error: Into, From bbe07f78294418e5f1d2aa95499987f827e3510c Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 14:03:02 +0200 Subject: [PATCH 093/357] removed PutPublication, DeletePublication --- zenoh/src/publication.rs | 144 +++++---------------------------------- 1 file changed, 18 insertions(+), 126 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index dd1818d842..41e2b0fa04 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -408,14 +408,19 @@ impl<'a> Publisher<'a> { /// # } /// ``` #[inline] - pub fn put(&self, payload: IntoPayload) -> PutPublication + pub fn put( + &self, + payload: IntoPayload, + ) -> PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> where IntoPayload: Into, { - PutPublication { + PublicationBuilder { publisher: self, - payload: payload.into(), - encoding: Encoding::ZENOH_BYTES, + kind: PublicationBuilderPut { + payload: payload.into(), + encoding: Encoding::ZENOH_BYTES, + }, timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), @@ -437,9 +442,10 @@ impl<'a> Publisher<'a> { /// publisher.delete().res().await.unwrap(); /// # } /// ``` - pub fn delete(&self) -> DeletePublication { - DeletePublication { + pub fn delete(&self) -> PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + PublicationBuilder { publisher: self, + kind: PublicationBuilderDelete, timestamp: None, #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), @@ -674,127 +680,13 @@ impl Drop for Publisher<'_> { } } -/// A [`Resolvable`] returned by [`Publisher::put()`](Publisher::put), -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct PutPublication<'a> { - publisher: &'a Publisher<'a>, - payload: Payload, - encoding: Encoding, - timestamp: Option, - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -/// A [`Resolvable`] returned by [`Publisher::delete()`](Publisher::delete) -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct DeletePublication<'a> { - publisher: &'a Publisher<'a>, - timestamp: Option, - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -impl TimestampBuilderTrait for PutPublication<'_> { - fn timestamp>>(self, timestamp: T) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -impl SampleBuilderTrait for PutPublication<'_> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - attachment: attachment.into(), - ..self - } - } -} - -impl ValueBuilderTrait for PutPublication<'_> { - fn encoding>(self, encoding: T) -> Self { - Self { - encoding: encoding.into(), - ..self - } - } - - fn payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { - Self { - payload: payload.into(), - ..self - } - } - - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { - payload, - encoding, - ..self - } - } -} - -impl TimestampBuilderTrait for DeletePublication<'_> { - fn timestamp>>(self, timestamp: T) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -impl SampleBuilderTrait for DeletePublication<'_> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - attachment: attachment.into(), - ..self - } - } -} - -impl Resolvable for PutPublication<'_> { - type To = ZResult<()>; -} - -impl Resolvable for DeletePublication<'_> { - type To = ZResult<()>; -} - -impl SyncResolve for PutPublication<'_> { +impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { fn res_sync(self) -> ::To { resolve_put( self.publisher, - self.payload, + self.kind.payload, SampleKind::Put, - self.encoding, + self.kind.encoding, self.timestamp, #[cfg(feature = "unstable")] self.source_info, @@ -804,7 +696,7 @@ impl SyncResolve for PutPublication<'_> { } } -impl SyncResolve for DeletePublication<'_> { +impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { fn res_sync(self) -> ::To { resolve_put( self.publisher, @@ -820,7 +712,7 @@ impl SyncResolve for DeletePublication<'_> { } } -impl AsyncResolve for PutPublication<'_> { +impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -828,7 +720,7 @@ impl AsyncResolve for PutPublication<'_> { } } -impl AsyncResolve for DeletePublication<'_> { +impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { type Future = Ready; fn res_async(self) -> Self::Future { From 4d0f6e52d07c9c0208430b454f8982044f2e0409 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 15:32:30 +0200 Subject: [PATCH 094/357] removed extra uses --- examples/examples/z_ping.rs | 1 - examples/examples/z_pong.rs | 1 - examples/examples/z_pub.rs | 1 - examples/examples/z_pub_shm_thr.rs | 1 - examples/examples/z_pub_thr.rs | 1 - .../zenoh-plugin-rest/examples/z_serve_sse.rs | 1 - .../src/replica/align_queryable.rs | 2 - .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 53 +++----- zenoh-ext/src/group.rs | 1 - zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/publication.rs | 65 +++++---- zenoh/src/queryable.rs | 126 +++++++++--------- zenoh/tests/session.rs | 1 - zenoh/tests/unicity.rs | 1 - 15 files changed, 115 insertions(+), 144 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 59bcaddadc..a57c937e48 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -16,7 +16,6 @@ use std::time::{Duration, Instant}; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index e0fa079629..baa5683f62 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -15,7 +15,6 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index b6a1ddc0d8..8cd3c4edba 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -15,7 +15,6 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::SampleBuilderTrait; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 86429e8ab7..c8a33f98fa 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -15,7 +15,6 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 78d54111a8..4354ad2e68 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -16,7 +16,6 @@ use clap::Parser; use std::convert::TryInto; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh_examples::CommonArgs; fn main() { diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index c353826fab..bb76005d6e 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,7 +15,6 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{config::Config, key_expr::keyexpr}; const HTML: &str = r#" diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 729572601c..1ce6a1cb16 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,8 +20,6 @@ use std::str; use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::TimestampBuilderTrait; -use zenoh::sample::builder::ValueBuilderTrait; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 5121f0b445..64d5cfa1cd 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; +use zenoh::sample::builder::SampleBuilder; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index feebfb588a..06c5882408 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,14 +23,13 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; -use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; +use zenoh::sample::builder::SampleBuilder; use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; -use zenoh::{Result as ZResult, Session, SessionDeclarations}; +use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; @@ -296,31 +295,25 @@ impl StorageService { ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store: Sample = match self + let sample_to_store: Sample = if let Some(update) = self .ovderriding_wild_update(&k, sample.timestamp().unwrap()) .await { - Some(Update { - kind: SampleKind::Put, - data, - }) => { - let Value { - payload, encoding, .. - } = data.value; - SampleBuilder::put(KeyExpr::from(k.clone()), payload) - .encoding(encoding) - .timestamp(data.timestamp) - .into() + match update.kind { + SampleKind::Put => { + SampleBuilder::put(KeyExpr::from(k.clone()), update.data.value.payload) + .encoding(update.data.value.encoding) + .timestamp(update.data.timestamp) + .into() + } + SampleKind::Delete => SampleBuilder::delete(KeyExpr::from(k.clone())) + .timestamp(update.data.timestamp) + .into(), } - Some(Update { - kind: SampleKind::Delete, - data, - }) => SampleBuilder::delete(KeyExpr::from(k.clone())) - .timestamp(data.timestamp) - .into(), - None => SampleBuilder::from(sample.clone()) + } else { + SampleBuilder::from(sample.clone()) .keyexpr(k.clone()) - .into(), + .into() }; let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { @@ -520,12 +513,9 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let Value { - payload, encoding, .. - } = entry.value; if let Err(e) = q - .reply(key.clone(), payload) - .encoding(encoding) + .reply(key.clone(), entry.value.payload) + .encoding(entry.value.encoding) .timestamp(entry.timestamp) .res() .await @@ -555,12 +545,9 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let Value { - payload, encoding, .. - } = entry.value; if let Err(e) = q - .reply(q.key_expr().clone(), payload) - .encoding(encoding) + .reply(q.key_expr().clone(), entry.value.payload) + .encoding(entry.value.encoding) .timestamp(entry.timestamp) .res() .await diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 2075ea9472..8a7823ed72 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -29,7 +29,6 @@ use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::Error as ZError; use zenoh::Result as ZResult; use zenoh::Session; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 4e9b46854d..d749a94ed9 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,7 +20,7 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::builder::{SampleBuilder, TimestampBuilderTrait}; +use zenoh::sample::builder::SampleBuilder; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 41e2b0fa04..64fa5b49c6 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -15,9 +15,6 @@ //! Publishing primitives. use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::builder::{ - QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, -}; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; @@ -105,29 +102,14 @@ impl QoSBuilderTrait for PublicationBuilder, T> { } } -impl TimestampBuilderTrait for PublicationBuilder { - fn timestamp>>(self, timestamp: TS) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -impl SampleBuilderTrait for PublicationBuilder { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: TA) -> Self { - Self { - attachment: attachment.into(), - ..self - } +impl PublicationBuilder, T> { + /// Restrict the matching subscribers that will receive the published data + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[zenoh_macros::unstable] + #[inline] + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.publisher = self.publisher.allowed_destination(destination); + self } } @@ -163,14 +145,29 @@ impl

ValueBuilderTrait for PublicationBuilder { } } -impl PublicationBuilder, T> { - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.publisher = self.publisher.allowed_destination(destination); - self +impl SampleBuilderTrait for PublicationBuilder { + #[cfg(feature = "unstable")] + fn source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + #[cfg(feature = "unstable")] + fn attachment>>(self, attachment: TA) -> Self { + Self { + attachment: attachment.into(), + ..self + } + } +} + +impl TimestampBuilderTrait for PublicationBuilder { + fn timestamp>>(self, timestamp: TS) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 37c3a2303a..0696fcbe33 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,9 +18,7 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::builder::{ - QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, -}; +use crate::sample::builder::SampleBuilder; use crate::sample::{QoSBuilder, SourceInfo}; use crate::Id; use crate::SessionRef; @@ -287,17 +285,17 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { + fn attachment>>(self, attachment: U) -> Self { Self { - source_info, + attachment: attachment.into(), ..self } } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: U) -> Self { + fn source_info(self, source_info: SourceInfo) -> Self { Self { - attachment: attachment.into(), + source_info, ..self } } @@ -382,6 +380,63 @@ impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { } } +impl Query { + fn _reply_sample(&self, sample: Sample) -> ZResult<()> { + if !self._accepts_any_replies().unwrap_or(false) + && !self.key_expr().intersects(&sample.key_expr) + { + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) + } + #[cfg(not(feature = "unstable"))] + let ext_sinfo = None; + #[cfg(feature = "unstable")] + let ext_sinfo = sample.source_info.into(); + self.inner.primitives.send_response(Response { + rid: self.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(sample.key_expr.into()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Reply(zenoh::Reply { + consolidation: zenoh::Consolidation::DEFAULT, + ext_unknown: vec![], + payload: match sample.kind { + SampleKind::Put => ReplyBody::Put(Put { + timestamp: sample.timestamp, + encoding: sample.encoding.into(), + ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm: None, + #[cfg(feature = "unstable")] + ext_attachment: sample.attachment.map(|a| a.into()), + #[cfg(not(feature = "unstable"))] + ext_attachment: None, + ext_unknown: vec![], + payload: sample.payload.into(), + }), + SampleKind::Delete => ReplyBody::Del(Del { + timestamp: sample.timestamp, + ext_sinfo, + #[cfg(feature = "unstable")] + ext_attachment: sample.attachment.map(|a| a.into()), + #[cfg(not(feature = "unstable"))] + ext_attachment: None, + ext_unknown: vec![], + }), + }, + }), + ext_qos: sample.qos.into(), + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.inner.zid, + eid: self.eid, + }), + }); + Ok(()) + } +} + impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { type Future = Ready; @@ -467,63 +522,6 @@ impl<'a> AsyncResolve for ReplyErrBuilder<'a> { } } -impl Query { - fn _reply_sample(&self, sample: Sample) -> ZResult<()> { - if !self._accepts_any_replies().unwrap_or(false) - && !self.key_expr().intersects(&sample.key_expr) - { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) - } - #[cfg(not(feature = "unstable"))] - let ext_sinfo = None; - #[cfg(feature = "unstable")] - let ext_sinfo = sample.source_info.into(); - self.inner.primitives.send_response(Response { - rid: self.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(sample.key_expr.into()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Reply(zenoh::Reply { - consolidation: zenoh::Consolidation::DEFAULT, - ext_unknown: vec![], - payload: match sample.kind { - SampleKind::Put => ReplyBody::Put(Put { - timestamp: sample.timestamp, - encoding: sample.encoding.into(), - ext_sinfo, - #[cfg(feature = "shared-memory")] - ext_shm: None, - #[cfg(feature = "unstable")] - ext_attachment: sample.attachment.map(|a| a.into()), - #[cfg(not(feature = "unstable"))] - ext_attachment: None, - ext_unknown: vec![], - payload: sample.payload.into(), - }), - SampleKind::Delete => ReplyBody::Del(Del { - timestamp: sample.timestamp, - ext_sinfo, - #[cfg(feature = "unstable")] - ext_attachment: sample.attachment.map(|a| a.into()), - #[cfg(not(feature = "unstable"))] - ext_attachment: None, - ext_unknown: vec![], - }), - }, - }), - ext_qos: sample.qos.into(), - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.inner.zid, - eid: self.eid, - }), - }); - Ok(()) - } -} - pub(crate) struct QueryableState { pub(crate) id: Id, pub(crate) key_expr: WireExpr<'static>, diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 0518316be9..8c2d2e9937 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -15,7 +15,6 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index a71a0a8034..f34704fb7e 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -16,7 +16,6 @@ use std::sync::Arc; use std::time::Duration; use tokio::runtime::Handle; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); From 23931f92d5f9c321d8a0247a1379cc76b0275def Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 1 Apr 2024 15:48:02 +0200 Subject: [PATCH 095/357] more cleanup --- plugins/zenoh-plugin-rest/src/lib.rs | 1 - zenoh/src/query.rs | 1 - zenoh/src/sample/builder.rs | 2 ++ zenoh/src/sample/mod.rs | 8 +++++++- zenoh/src/session.rs | 1 + zenoh/tests/attachments.rs | 2 +- 6 files changed, 11 insertions(+), 4 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index f78c541eff..43c3f33776 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -34,7 +34,6 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; -use zenoh::sample::builder::ValueBuilderTrait; use zenoh::selector::TIME_RANGE_KEY; use zenoh::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 9f96db4f4b..cb1116130d 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -15,7 +15,6 @@ //! Query primitives. use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; -use crate::sample::builder::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::QoSBuilder; diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 295451abc1..5fab36617d 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -14,9 +14,11 @@ use std::marker::PhantomData; +#[cfg(feature = "unstable")] use crate::sample::Attachment; use crate::sample::QoS; use crate::sample::QoSBuilder; +#[cfg(feature = "unstable")] use crate::sample::SourceInfo; use crate::Encoding; use crate::KeyExpr; diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index be80f8277e..6e457578a3 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -117,7 +117,12 @@ impl DataInfoIntoSample for Option { IntoPayload: Into, { if let Some(data_info) = self { - data_info.into_sample(key_expr, payload, attachment) + data_info.into_sample( + key_expr, + payload, + #[cfg(feature = "unstable")] + attachment, + ) } else { Sample { key_expr: key_expr.into(), @@ -172,6 +177,7 @@ impl SourceInfo { } } +#[zenoh_macros::unstable] impl From for Option { fn from(source_info: SourceInfo) -> Option { if source_info.is_empty() { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 63cc0bb7fa..c44cb4f817 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -809,6 +809,7 @@ impl Session { #[cfg(feature = "unstable")] attachment: None, handler: DefaultHandler, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), } } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 2725351ab0..9fb99b7cc0 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,7 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait}; + use zenoh::prelude::sync::*; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh From 90923ca30bcc56b5eaf2e194643fe45c5395168e Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 11:43:03 +0200 Subject: [PATCH 096/357] keyexpr.rs in api/ --- zenoh/src/{ => api}/key_expr.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{ => api}/key_expr.rs (100%) diff --git a/zenoh/src/key_expr.rs b/zenoh/src/api/key_expr.rs similarity index 100% rename from zenoh/src/key_expr.rs rename to zenoh/src/api/key_expr.rs From dd2ee5c4add81fb68d892622e189b3914a9d3188 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 11:43:12 +0200 Subject: [PATCH 097/357] keyexpr.rs in api/ --- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 1 - .../src/replica/aligner.rs | 1 - zenoh/src/api.rs | 15 +++++++++++++++ zenoh/src/api/key_expr.rs | 4 ++-- zenoh/src/lib.rs | 10 +++++++++- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 2 +- zenoh/src/publication.rs | 12 ++++++------ zenoh/src/selector.rs | 2 +- zenoh/src/session.rs | 2 +- zenoh/src/subscriber.rs | 2 +- 12 files changed, 38 insertions(+), 17 deletions(-) create mode 100644 zenoh/src/api.rs diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index d3ddbd43cc..5ab59ebe45 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -17,7 +17,7 @@ use schemars::JsonSchema; use serde_json::{Map, Value}; use std::convert::TryFrom; use std::time::Duration; -use zenoh::{key_expr::keyexpr, prelude::OwnedKeyExpr, Result as ZResult}; +use zenoh::{prelude::keyexpr, prelude::OwnedKeyExpr, Result as ZResult}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index bb76005d6e..85d730bb41 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -15,7 +15,6 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::{config::Config, key_expr::keyexpr}; const HTML: &str = r#"

diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 64d5cfa1cd..ca93651e46 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,7 +18,6 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; -use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::sample::builder::SampleBuilder; diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs new file mode 100644 index 0000000000..94893aca68 --- /dev/null +++ b/zenoh/src/api.rs @@ -0,0 +1,15 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub(crate) mod key_expr; \ No newline at end of file diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index d2bfb5bcfe..4cbe6409f2 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -323,8 +323,8 @@ impl FromStr for KeyExpr<'static> { Ok(Self(KeyExprInner::Owned(s.parse()?))) } } -impl<'a> From> for OwnedKeyExpr { - fn from(val: super::KeyExpr<'a>) -> Self { +impl<'a> From> for OwnedKeyExpr { + fn from(val: KeyExpr<'a>) -> Self { match val.0 { KeyExprInner::Borrowed(key_expr) | KeyExprInner::BorrowedWire { key_expr, .. } => { key_expr.into() diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index ea212485ec..e62db34c59 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -123,12 +123,20 @@ pub const FEATURES: &str = concat_enabled_features!( ] ); +pub mod key_expr { + pub use crate::api::key_expr::OwnedKeyExpr; + pub mod format { + pub use crate::api::key_expr::format::KeFormat; + } +} + + mod admin; #[macro_use] mod session; pub use session::*; -pub mod key_expr; +mod api; pub(crate) mod net; pub use net::runtime; pub mod selector; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 5ef6b7cdfe..148d9eddab 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -13,7 +13,7 @@ use super::routing::dispatcher::face::Face; use super::Runtime; use crate::encoding::Encoding; -use crate::key_expr::KeyExpr; +use crate::api::key_expr::KeyExpr; use crate::net::primitives::Primitives; use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 850148f506..d059a82afc 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -24,7 +24,7 @@ pub use common::*; pub(crate) mod common { - pub use crate::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; + pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; pub use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 64fa5b49c6..1f6ad17333 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -878,9 +878,9 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { .try_into() .expect("How did you get a key expression with a length over 2^32!?"); key_expr = match key_expr.0 { - crate::key_expr::KeyExprInner::Borrowed(key_expr) - | crate::key_expr::KeyExprInner::BorrowedWire { key_expr, .. } => { - KeyExpr(crate::key_expr::KeyExprInner::BorrowedWire { + crate::api::key_expr::KeyExprInner::Borrowed(key_expr) + | crate::api::key_expr::KeyExprInner::BorrowedWire { key_expr, .. } => { + KeyExpr(crate::api::key_expr::KeyExprInner::BorrowedWire { key_expr, expr_id, mapping: Mapping::Sender, @@ -888,9 +888,9 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { session_id, }) } - crate::key_expr::KeyExprInner::Owned(key_expr) - | crate::key_expr::KeyExprInner::Wire { key_expr, .. } => { - KeyExpr(crate::key_expr::KeyExprInner::Wire { + crate::api::key_expr::KeyExprInner::Owned(key_expr) + | crate::api::key_expr::KeyExprInner::Wire { key_expr, .. } => { + KeyExpr(crate::api::key_expr::KeyExprInner::Wire { key_expr, expr_id, mapping: Mapping::Sender, diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 2a9a38c02c..df562e196b 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -18,7 +18,7 @@ use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh_result::ZResult; pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; -use crate::{prelude::KeyExpr, queryable::Query}; +use crate::{api::key_expr::KeyExpr, queryable::Query}; use std::{ borrow::{Borrow, Cow}, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index c44cb4f817..9af5ee1d5c 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -17,7 +17,7 @@ use crate::config::Notifier; use crate::encoding::Encoding; use crate::handlers::{Callback, DefaultHandler}; use crate::info::*; -use crate::key_expr::KeyExprInner; +use crate::api::key_expr::KeyExprInner; #[zenoh_macros::unstable] use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 60a31a6577..1fc6e82b46 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -14,7 +14,7 @@ //! Subscribing primitives. use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; -use crate::key_expr::KeyExpr; +use crate::api::key_expr::KeyExpr; use crate::prelude::Locality; use crate::sample::Sample; use crate::Id; From 948f8e32dfa3583914d5576b43cd2d257ada88dc Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 12:10:20 +0200 Subject: [PATCH 098/357] keyexpr build fix --- examples/examples/z_formats.rs | 8 +++++--- zenoh/src/lib.rs | 9 ++++++++- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index 357448143e..69313f0e56 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,9 +12,11 @@ // ZettaScale Zenoh Team, // -use zenoh::prelude::keyexpr; +use zenoh::key_expr::keyexpr; +use zenoh::key_expr::kedefine; +use zenoh::key_expr::keformat; -zenoh::kedefine!( +kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", pub(crate) settings_format: "user_id/${user_id:*}/settings/${setting:**}" ); @@ -23,7 +25,7 @@ fn main() { // Formatting let mut formatter = file_format::formatter(); let file = "hi/there"; - let ke = zenoh::keformat!(formatter, user_id = 42, file).unwrap(); + let ke = keformat!(formatter, user_id = 42, file).unwrap(); println!("{formatter:?} => {ke}"); // Parsing let settings_ke = keyexpr::new("user_id/30/settings/dark_mode").unwrap(); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e62db34c59..c0bf501cc9 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -124,9 +124,16 @@ pub const FEATURES: &str = concat_enabled_features!( ); pub mod key_expr { + pub use crate::api::key_expr::keyexpr; pub use crate::api::key_expr::OwnedKeyExpr; + pub use crate::api::key_expr::kedefine; + pub use crate::api::key_expr::keformat; + // keyexpr format macro support pub mod format { - pub use crate::api::key_expr::format::KeFormat; + pub use crate::api::key_expr::format::*; + pub mod macro_support { + pub use crate::api::key_expr::format::macro_support::*; + } } } From 5bd3e99fb0a5955c6b83679fe3ee4de2a40e168c Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 14:24:57 +0200 Subject: [PATCH 099/357] session move unfinished --- zenoh/src/{ => api}/session.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{ => api}/session.rs (100%) diff --git a/zenoh/src/session.rs b/zenoh/src/api/session.rs similarity index 100% rename from zenoh/src/session.rs rename to zenoh/src/api/session.rs From 2378d5c3f9eb16a6bd6a815bc1478f705f848484 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 14:25:06 +0200 Subject: [PATCH 100/357] session move unfinished --- plugins/zenoh-plugin-example/src/lib.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 4 +- .../src/backends_mgt.rs | 2 +- .../zenoh-plugin-storage-manager/src/lib.rs | 4 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/mod.rs | 2 +- .../src/replica/storage.rs | 378 ++++++++++-------- .../src/storages_mgt.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh-ext/src/session_ext.rs | 2 +- zenoh/src/admin.rs | 4 +- zenoh/src/api.rs | 3 +- zenoh/src/api/key_expr.rs | 2 +- zenoh/src/api/session.rs | 156 ++++++++ zenoh/src/handlers.rs | 3 +- zenoh/src/info.rs | 2 +- zenoh/src/lib.rs | 171 +------- zenoh/src/liveliness.rs | 2 +- zenoh/src/publication.rs | 8 +- zenoh/src/queryable.rs | 4 +- zenoh/src/subscriber.rs | 6 +- zenoh/tests/qos.rs | 2 +- 25 files changed, 416 insertions(+), 353 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index ad254278e3..5615ce68af 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -144,7 +144,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { env_logger::init(); // create a zenoh Session that shares the same Runtime than zenohd - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).res().await.unwrap(); // the HasMap used as a storage by this example of storage plugin let mut stored: HashMap = HashMap::new(); diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 43c3f33776..49c58f5074 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -35,7 +35,7 @@ use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::selector::TIME_RANGE_KEY; -use zenoh::Session; +use zenoh::session::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; @@ -490,7 +490,7 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { let _ = env_logger::try_init(); let zid = runtime.zid().to_string(); - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).res().await.unwrap(); let mut app = Server::with_state((Arc::new(session), zid)); app.with( diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index 90a6ae6250..dcce49f5da 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,7 +14,7 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; -use zenoh::Session; +use zenoh::session::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; use zenoh_result::ZResult; diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 91df2f108d..78a9814179 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -30,7 +30,7 @@ use storages_mgt::StorageMessage; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::sync::*; use zenoh::runtime::Runtime; -use zenoh::Session; +use zenoh::session::Session; use zenoh_backend_traits::config::ConfigDiff; use zenoh_backend_traits::config::PluginConfig; use zenoh_backend_traits::config::StorageConfig; @@ -114,7 +114,7 @@ impl StorageRuntimeInner { let plugins_manager = PluginsManager::dynamic(lib_loader.clone(), BACKEND_LIB_PREFIX) .declare_static_plugin::(); - let session = Arc::new(zenoh::init(runtime.clone()).res_sync()?); + let session = Arc::new(zenoh::session::init(runtime.clone()).res_sync()?); // After this moment result should be only Ok. Failure of loading of one voulme or storage should not affect others. diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 1ce6a1cb16..8654927f9f 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -21,7 +21,7 @@ use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; -use zenoh::Session; +use zenoh::session::Session; pub struct AlignQueryable { session: Arc, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index ca93651e46..9c54bcf461 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -22,7 +22,7 @@ use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::sample::builder::SampleBuilder; use zenoh::time::Timestamp; -use zenoh::Session; +use zenoh::session::Session; pub struct Aligner { session: Arc, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 5dda032029..9a4fd35a11 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -29,7 +29,7 @@ use urlencoding::encode; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; -use zenoh::Session; +use zenoh::session::Session; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; pub mod align_queryable; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 06c5882408..69ecf9477c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -19,19 +19,15 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use std::collections::{HashMap, HashSet}; -use std::str::{self, FromStr}; +use std::str::FromStr; use std::time::{SystemTime, UNIX_EPOCH}; -use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; -use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::builder::SampleBuilder; -use zenoh::sample::{Sample, SampleKind}; +use zenoh::query::ConsolidationMode; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; -use zenoh::value::Value; -use zenoh::{Result as ZResult, Session}; +use zenoh::{Result as ZResult, session::Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; +use zenoh_backend_traits::{Capability, History, Persistence, Storage, StorageInsertionResult, StoredData}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -42,15 +38,148 @@ use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; +#[derive(Clone, Debug)] +pub enum StorageSampleKind { + Put(Value), + Delete, +} + +#[derive(Clone, Debug)] +pub struct StorageSample { + pub key_expr: KeyExpr<'static>, + pub timestamp: Timestamp, + pub kind: StorageSampleKind, +} + +impl From for StorageSample { + fn from(sample: Sample) -> Self { + let timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); + // TODO: add API for disassembly of Sample + let key_expr = sample.key_expr().clone(); + let payload = sample.payload().clone(); + let encoding = sample.encoding().clone(); + let kind = match sample.kind() { + SampleKind::Put => StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)), + SampleKind::Delete => StorageSampleKind::Delete, + }; + StorageSample { + key_expr, + timestamp, + kind, + } + } +} + #[derive(Clone)] -struct Update { - kind: SampleKind, - data: StoredData, +enum Update { + Put(StoredData), + Delete(Timestamp), +} + +impl From for Update { + fn from(value: StorageSample) -> Self { + match value.kind { + StorageSampleKind::Put(data) => Update::Put(StoredData { + value: data, + timestamp: value.timestamp, + }), + StorageSampleKind::Delete => Update::Delete(value.timestamp), + } + } +} + +impl Update { + fn timestamp(&self) -> &Timestamp { + match self { + Update::Put(data) => &data.timestamp, + Update::Delete(ts) => ts, + } + } +} + +// implement from String for Update +impl TryFrom for Update { + type Error = zenoh::Error; + + fn try_from(value: String) -> Result { + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&value)?; + let mut payload = ZBuf::default(); + for slice in result.3 { + payload.push_zslice(slice.to_vec().into()); + } + let value = Value::new(payload).with_encoding(result.2); + let timestamp = Timestamp::from_str(&result.1).map_err(|_| "Error parsing timestamp")?; + if result.0.eq(&(SampleKind::Put).to_string()) { + Ok(Update::Put(StoredData { value, timestamp })) + } else { + Ok(Update::Delete(timestamp)) + } + } +} + +// implement to_string for Update +impl ToString for Update { + fn to_string(&self) -> String { + let result = match self { + Update::Put(data) => ( + SampleKind::Put.to_string(), + data.timestamp.to_string(), + data.value.encoding.to_string(), + data.value.payload.slices().collect::>(), + ), + Update::Delete(ts) => ( + SampleKind::Delete.to_string(), + ts.to_string(), + "".to_string(), + vec![], + ), + }; + serde_json::to_string_pretty(&result).unwrap() + } +} + +trait IntoStorageSample { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample + where + IntoKeyExpr: Into>; +} + +impl IntoStorageSample for StoredData { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample + where + IntoKeyExpr: Into>, + { + StorageSample { + key_expr: key_expr.into(), + timestamp: self.timestamp, + kind: StorageSampleKind::Put(self.value), + } + } +} + +impl IntoStorageSample for Update { + fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample + where + IntoKeyExpr: Into>, + { + match self { + Update::Put(data) => StorageSample { + key_expr: key_expr.into(), + timestamp: data.timestamp, + kind: StorageSampleKind::Put(data.value), + }, + Update::Delete(ts) => StorageSample { + key_expr: key_expr.into(), + timestamp: ts, + kind: StorageSampleKind::Delete, + }, + } + } } pub struct ReplicationService { pub empty_start: bool, - pub aligner_updates: Receiver, + pub aligner_updates: Receiver, pub log_propagation: Sender<(OwnedKeyExpr, Timestamp)>, } @@ -109,10 +238,11 @@ impl StorageService { let saved_wc = std::fs::read_to_string(zenoh_home().join(WILDCARD_UPDATES_FILENAME)).unwrap(); let saved_wc: HashMap = - serde_json::from_str(&saved_wc).unwrap(); + serde_json::from_str(&saved_wc).unwrap(); // TODO: Remove unwrap let mut wildcard_updates = storage_service.wildcard_updates.write().await; for (k, data) in saved_wc { - wildcard_updates.insert(&k, construct_update(data)); + wildcard_updates.insert(&k, Update::try_from(data).unwrap()); + // TODO: Remove unwrap } } } @@ -183,7 +313,7 @@ impl StorageService { log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { - self.process_sample(sample).await; + self.process_sample(sample.into()).await; } }, // on query on key_expr @@ -223,16 +353,15 @@ impl StorageService { select!( // on sample for key_expr sample = storage_sub.recv_async() => { - let sample = match sample { + let mut sample = match sample { Ok(sample) => sample, Err(e) => { log::error!("Error in sample: {}", e); continue; } }; - let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); - self.process_sample(sample).await; + sample.ensure_timestamp(); + self.process_sample(sample.into()).await; }, // on query on key_expr query = storage_queryable.recv_async() => { @@ -262,61 +391,48 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin - async fn process_sample(&self, sample: Sample) { + async fn process_sample(&self, sample: StorageSample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); + // if wildcard, update wildcard_updates - if sample.key_expr().is_wild() { + if sample.key_expr.is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr().is_wild() { - self.get_matching_keys(sample.key_expr()).await + let matching_keys = if sample.key_expr.is_wild() { + self.get_matching_keys(&sample.key_expr).await } else { - vec![sample.key_expr().clone().into()] + vec![sample.key_expr.clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr(), + sample.key_expr, matching_keys ); for k in matching_keys { if !self - .is_deleted(&k.clone(), sample.timestamp().unwrap()) + .is_deleted(&k.clone(), &sample.timestamp) .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, sample.timestamp().unwrap()).await)) + && self.is_latest(&k, &sample.timestamp).await)) { log::trace!( "Sample `{:?}` identified as neded processing for key {}", sample, - k + &k ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store: Sample = if let Some(update) = self - .ovderriding_wild_update(&k, sample.timestamp().unwrap()) - .await - { - match update.kind { - SampleKind::Put => { - SampleBuilder::put(KeyExpr::from(k.clone()), update.data.value.payload) - .encoding(update.data.value.encoding) - .timestamp(update.data.timestamp) - .into() - } - SampleKind::Delete => SampleBuilder::delete(KeyExpr::from(k.clone())) - .timestamp(update.data.timestamp) - .into(), - } - } else { - SampleBuilder::from(sample.clone()) - .keyexpr(k.clone()) - .into() - }; + let sample_to_store = + match self.ovderriding_wild_update(&k, &sample.timestamp).await { + Some(overriding_update) => overriding_update.into_sample(k.clone()), - let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { + None => sample.clone().into(), + }; + + let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -324,25 +440,21 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = match sample.kind() { - SampleKind::Put => { + let result = match sample_to_store.kind { + StorageSampleKind::Put(data) => { storage .put( stripped_key, - Value::new(sample_to_store.payload().clone()) - .encoding(sample_to_store.encoding().clone()), - *sample_to_store.timestamp().unwrap(), + data, + sample_to_store.timestamp, ) .await - } - SampleKind::Delete => { + }, + StorageSampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) - .await; - storage - .delete(stripped_key, *sample_to_store.timestamp().unwrap()) - .await - } + self.mark_tombstone(&k, sample_to_store.timestamp).await; + storage.delete(stripped_key, sample_to_store.timestamp).await + }, }; drop(storage); if self.replication.is_some() @@ -354,7 +466,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), *sample_to_store.timestamp().unwrap())); + .send((k.clone(), sample_to_store.timestamp)); match sending { Ok(_) => (), Err(e) => { @@ -385,26 +497,16 @@ impl StorageService { } } - async fn register_wildcard_update(&self, sample: Sample) { + async fn register_wildcard_update(&self, sample: StorageSample) { // @TODO: change into a better store that does incremental writes - let key = sample.key_expr().clone(); + let key = sample.key_expr.clone(); let mut wildcards = self.wildcard_updates.write().await; - let timestamp = *sample.timestamp().unwrap(); - wildcards.insert( - &key, - Update { - kind: sample.kind(), - data: StoredData { - value: Value::from(sample), - timestamp, - }, - }, - ); + wildcards.insert(&key, sample.into()); if self.capability.persistence.eq(&Persistence::Durable) { // flush to disk to makeit durable let mut serialized_data = HashMap::new(); for (k, update) in wildcards.key_value_pairs() { - serialized_data.insert(k, serialize_update(update)); + serialized_data.insert(k, update.to_string()); } if let Err(e) = std::fs::write( zenoh_home().join(WILDCARD_UPDATES_FILENAME), @@ -433,34 +535,36 @@ impl StorageService { let mut update = None; for node in wildcards.intersecting_keys(key_expr) { let weight = wildcards.weight_at(&node); - if weight.is_some() && weight.unwrap().data.timestamp > *ts { - // if the key matches a wild card update, check whether it was saved in storage - // remember that wild card updates change only existing keys - let stripped_key = match self.strip_prefix(&key_expr.into()) { - Ok(stripped) => stripped, - Err(e) => { - log::error!("{}", e); - break; - } - }; - let mut storage = self.storage.lock().await; - match storage.get(stripped_key, "").await { - Ok(stored_data) => { - for entry in stored_data { - if entry.timestamp > *ts { - return None; + if let Some(weight) = weight { + if weight.timestamp() > ts { + // if the key matches a wild card update, check whether it was saved in storage + // remember that wild card updates change only existing keys + let stripped_key = match self.strip_prefix(&key_expr.into()) { + Ok(stripped) => stripped, + Err(e) => { + log::error!("{}", e); + break; + } + }; + let mut storage = self.storage.lock().await; + match storage.get(stripped_key, "").await { + Ok(stored_data) => { + for entry in stored_data { + if entry.timestamp > *ts { + return None; + } } } - } - Err(e) => { - log::warn!( - "Storage '{}' raised an error fetching a query on key {} : {}", - self.name, - key_expr, - e - ); - ts = &weight.unwrap().data.timestamp; - update = Some(weight.unwrap().clone()); + Err(e) => { + log::warn!( + "Storage '{}' raised an error fetching a query on key {} : {}", + self.name, + key_expr, + e + ); + ts = weight.timestamp(); + update = Some(weight.clone()); + } } } } @@ -513,13 +617,8 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - if let Err(e) = q - .reply(key.clone(), entry.value.payload) - .encoding(entry.value.encoding) - .timestamp(entry.timestamp) - .res() - .await - { + let sample = entry.into_sample(key.clone()); + if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -545,13 +644,13 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - if let Err(e) = q - .reply(q.key_expr().clone(), entry.value.payload) - .encoding(entry.value.encoding) - .timestamp(entry.timestamp) - .res() - .await - { + let Value { + payload, encoding, .. + } = entry.value; + let sample = Sample::put(q.key_expr().clone(), payload) + .with_encoding(encoding) + .with_timestamp(entry.timestamp); + if let Err(e) = q.reply_sample(sample).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -658,7 +757,7 @@ impl StorageService { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - self.process_sample(sample).await; + self.process_sample(sample.into()).await; } Err(e) => log::warn!( "Storage '{}' received an error to align query: {:?}", @@ -671,47 +770,6 @@ impl StorageService { } } -fn serialize_update(update: &Update) -> String { - let Update { - kind, - data: - StoredData { - value: Value { - payload, encoding, .. - }, - timestamp, - }, - } = update; - let zbuf: ZBuf = payload.into(); - - let result = ( - kind.to_string(), - timestamp.to_string(), - encoding.to_string(), - zbuf.slices().collect::>(), - ); - serde_json::to_string_pretty(&result).unwrap() -} - -fn construct_update(data: String) -> Update { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() - let mut payload = ZBuf::default(); - for slice in result.3 { - payload.push_zslice(slice.to_vec().into()); - } - let value = Value::new(payload).encoding(result.2); - let data = StoredData { - value, - timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() - }; - let kind = if result.0.eq(&(SampleKind::Put).to_string()) { - SampleKind::Put - } else { - SampleKind::Delete - }; - Update { kind, data } -} - // Periodic event cleaning-up data info for old metadata struct GarbageCollectionEvent { config: GarbageCollectionConfig, @@ -743,7 +801,7 @@ impl Timed for GarbageCollectionEvent { let mut to_be_removed = HashSet::new(); for (k, update) in wildcard_updates.key_value_pairs() { - let ts = update.data.timestamp; + let ts = update.timestamp(); if ts.get_time() < &time_limit { // mark key to be removed to_be_removed.insert(k); diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 6de5e2f2ca..8643429a65 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use async_std::sync::Arc; -use zenoh::Session; +use zenoh::session::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_result::ZResult; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 8a7823ed72..f74d9d547a 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -31,7 +31,7 @@ use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; use zenoh::Error as ZError; use zenoh::Result as ZResult; -use zenoh::Session; +use zenoh::session::Session; use zenoh_result::bail; use zenoh_sync::Condition; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 03f0814e5c..fdba3af231 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -18,7 +18,7 @@ use std::future::Ready; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; use zenoh::subscriber::FlumeSubscriber; -use zenoh::SessionRef; +use zenoh::session::SessionRef; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_result::{bail, ZResult}; use zenoh_util::core::ResolveFuture; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index d749a94ed9..4a9469c835 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -24,7 +24,7 @@ use zenoh::sample::builder::SampleBuilder; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; -use zenoh::SessionRef; +use zenoh::session::SessionRef; use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; use crate::ExtractSample; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 73fbd7dfc4..2c9826c98b 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -15,7 +15,7 @@ use super::PublicationCacheBuilder; use std::convert::TryInto; use std::sync::Arc; use zenoh::prelude::KeyExpr; -use zenoh::{Session, SessionRef}; +use zenoh::session::{Session, SessionRef}; /// Some extensions to the [`zenoh::Session`](zenoh::Session) pub trait SessionExt<'s, 'a> { diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 3c76ca468a..260617cda2 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -17,14 +17,14 @@ use crate::{ prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, sample::DataInfo, - Payload, Session, ZResult, + Payload, Session }; use std::{ collections::hash_map::DefaultHasher, hash::{Hash, Hasher}, sync::Arc, }; -use zenoh_core::SyncResolve; +use zenoh_core::{Result as ZResult, SyncResolve}; use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use zenoh_transport::{ TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 94893aca68..1e7cec5380 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -12,4 +12,5 @@ // ZettaScale Zenoh Team, // -pub(crate) mod key_expr; \ No newline at end of file +pub(crate) mod key_expr; +pub(crate) mod session; \ No newline at end of file diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 4cbe6409f2..47d3a71c56 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -57,7 +57,7 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{net::primitives::Primitives, prelude::Selector, Session, Undeclarable}; +use crate::{net::primitives::Primitives, prelude::Selector, Session, api::session::Undeclarable}; #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 9af5ee1d5c..89cd249bdb 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -44,10 +44,12 @@ use crate::Selector; use crate::SourceInfo; use crate::Value; use log::{error, trace, warn}; +use zenoh_core::Resolvable; use std::collections::HashMap; use std::convert::TryFrom; use std::convert::TryInto; use std::fmt; +use std::future::Ready; use std::ops::Deref; use std::sync::atomic::{AtomicU16, Ordering}; use std::sync::Arc; @@ -2595,3 +2597,157 @@ impl crate::net::primitives::EPrimitives for Session { self } } + +/// Open a zenoh [`Session`]. +/// +/// # Arguments +/// +/// * `config` - The [`Config`] for the zenoh session +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// # } +/// ``` +/// +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use std::str::FromStr; +/// use zenoh::prelude::r#async::*; +/// +/// let mut config = config::peer(); +/// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); +/// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); +/// +/// let session = zenoh::open(config).res().await.unwrap(); +/// # } +/// ``` +pub fn open(config: TryIntoConfig) -> OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + OpenBuilder { config } +} + +/// A builder returned by [`open`] used to open a zenoh [`Session`]. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +pub struct OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + config: TryIntoConfig, +} + +impl Resolvable for OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + type To = ZResult; +} + +impl SyncResolve for OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + fn res_sync(self) -> ::To { + let config: crate::config::Config = self + .config + .try_into() + .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; + Session::new(config).res_sync() + } +} + +impl AsyncResolve for OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +/// Initialize a Session with an existing Runtime. +/// This operation is used by the plugins to share the same Runtime as the router. +#[doc(hidden)] +#[zenoh_macros::unstable] +pub fn init(runtime: Runtime) -> InitBuilder { + InitBuilder { + runtime, + aggregated_subscribers: vec![], + aggregated_publishers: vec![], + } +} + +/// A builder returned by [`init`] and used to initialize a Session with an existing Runtime. +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[doc(hidden)] +#[zenoh_macros::unstable] +pub struct InitBuilder { + runtime: Runtime, + aggregated_subscribers: Vec, + aggregated_publishers: Vec, +} + +#[zenoh_macros::unstable] +impl InitBuilder { + #[inline] + pub fn aggregated_subscribers(mut self, exprs: Vec) -> Self { + self.aggregated_subscribers = exprs; + self + } + + #[inline] + pub fn aggregated_publishers(mut self, exprs: Vec) -> Self { + self.aggregated_publishers = exprs; + self + } +} + +#[zenoh_macros::unstable] +impl Resolvable for InitBuilder { + type To = ZResult; +} + +#[zenoh_macros::unstable] +impl SyncResolve for InitBuilder { + fn res_sync(self) -> ::To { + Ok(Session::init( + self.runtime, + self.aggregated_subscribers, + self.aggregated_publishers, + ) + .res_sync()) + } +} + +#[zenoh_macros::unstable] +impl AsyncResolve for InitBuilder { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs index c5d2c6bb90..6aecda34b9 100644 --- a/zenoh/src/handlers.rs +++ b/zenoh/src/handlers.rs @@ -13,8 +13,7 @@ // //! Callback handler trait. -use crate::API_DATA_RECEPTION_CHANNEL_SIZE; - +use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; use std::sync::{Arc, Mutex, Weak}; use zenoh_collections::RingBuffer as RingBufferInner; use zenoh_result::ZResult; diff --git a/zenoh/src/info.rs b/zenoh/src/info.rs index 3e0efdf134..1f7a903ba4 100644 --- a/zenoh/src/info.rs +++ b/zenoh/src/info.rs @@ -13,7 +13,7 @@ // //! Tools to access information about the current zenoh [`Session`](crate::Session). -use crate::SessionRef; +use crate::api::session::SessionRef; use std::future::Ready; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::{WhatAmI, ZenohId}; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c0bf501cc9..2f1beb5413 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -84,14 +84,10 @@ pub(crate) type Id = u32; use git_version::git_version; use handlers::DefaultHandler; #[cfg(feature = "unstable")] -use net::runtime::Runtime; use prelude::*; use scouting::ScoutBuilder; -use std::future::Ready; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; pub use zenoh_macros::{ke, kedefine, keformat, kewrite}; use zenoh_protocol::core::WhatAmIMatcher; -use zenoh_result::{zerror, ZResult}; use zenoh_util::concat_enabled_features; /// A zenoh error. @@ -123,6 +119,8 @@ pub const FEATURES: &str = concat_enabled_features!( ] ); +pub use crate::api::session::open; + pub mod key_expr { pub use crate::api::key_expr::keyexpr; pub use crate::api::key_expr::OwnedKeyExpr; @@ -137,11 +135,16 @@ pub mod key_expr { } } +pub mod session { + pub use crate::api::session::open; + pub use crate::api::session::init; + pub use crate::api::session::Session; + pub use crate::api::session::SessionRef; + pub use crate::api::session::SessionDeclarations; +} mod admin; #[macro_use] -mod session; -pub use session::*; mod api; pub(crate) mod net; @@ -231,158 +234,4 @@ where config: config.try_into().map_err(|e| e.into()), handler: DefaultHandler, } -} - -/// Open a zenoh [`Session`]. -/// -/// # Arguments -/// -/// * `config` - The [`Config`] for the zenoh session -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// # } -/// ``` -/// -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use std::str::FromStr; -/// use zenoh::prelude::r#async::*; -/// -/// let mut config = config::peer(); -/// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); -/// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); -/// -/// let session = zenoh::open(config).res().await.unwrap(); -/// # } -/// ``` -pub fn open(config: TryIntoConfig) -> OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - OpenBuilder { config } -} - -/// A builder returned by [`open`] used to open a zenoh [`Session`]. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - config: TryIntoConfig, -} - -impl Resolvable for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - type To = ZResult; -} - -impl SyncResolve for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - fn res_sync(self) -> ::To { - let config: crate::config::Config = self - .config - .try_into() - .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; - Session::new(config).res_sync() - } -} - -impl AsyncResolve for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -/// Initialize a Session with an existing Runtime. -/// This operation is used by the plugins to share the same Runtime as the router. -#[doc(hidden)] -#[zenoh_macros::unstable] -pub fn init(runtime: Runtime) -> InitBuilder { - InitBuilder { - runtime, - aggregated_subscribers: vec![], - aggregated_publishers: vec![], - } -} - -/// A builder returned by [`init`] and used to initialize a Session with an existing Runtime. -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[doc(hidden)] -#[zenoh_macros::unstable] -pub struct InitBuilder { - runtime: Runtime, - aggregated_subscribers: Vec, - aggregated_publishers: Vec, -} - -#[zenoh_macros::unstable] -impl InitBuilder { - #[inline] - pub fn aggregated_subscribers(mut self, exprs: Vec) -> Self { - self.aggregated_subscribers = exprs; - self - } - - #[inline] - pub fn aggregated_publishers(mut self, exprs: Vec) -> Self { - self.aggregated_publishers = exprs; - self - } -} - -#[zenoh_macros::unstable] -impl Resolvable for InitBuilder { - type To = ZResult; -} - -#[zenoh_macros::unstable] -impl SyncResolve for InitBuilder { - fn res_sync(self) -> ::To { - Ok(Session::init( - self.runtime, - self.aggregated_subscribers, - self.aggregated_publishers, - ) - .res_sync()) - } -} - -#[zenoh_macros::unstable] -impl AsyncResolve for InitBuilder { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} +} \ No newline at end of file diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 23e1846741..8ce5386c3f 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -26,7 +26,7 @@ use { handlers::DefaultHandler, prelude::*, subscriber::{Subscriber, SubscriberInner}, - SessionRef, Undeclarable, + api::session::SessionRef, api::session::Undeclarable, }, std::convert::TryInto, std::future::Ready, diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 1f6ad17333..f634a14dd1 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -18,8 +18,8 @@ use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; -use crate::SessionRef; -use crate::Undeclarable; +use crate::api::session::SessionRef; +use crate::api::session::Undeclarable; #[cfg(feature = "unstable")] use crate::{ handlers::{Callback, DefaultHandler, IntoHandler}, @@ -1511,7 +1511,7 @@ mod tests { #[test] fn sample_kind_integrity_in_publication() { - use crate::{open, prelude::sync::*}; + use crate::{api::session::open, prelude::sync::*}; const KEY_EXPR: &str = "test/sample_kind_integrity/publication"; const VALUE: &str = "zenoh"; @@ -1539,7 +1539,7 @@ mod tests { #[test] fn sample_kind_integrity_in_put_builder() { - use crate::{open, prelude::sync::*}; + use crate::{api::session::open, prelude::sync::*}; const KEY_EXPR: &str = "test/sample_kind_integrity/put_builder"; const VALUE: &str = "zenoh"; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 0696fcbe33..447dfc81b6 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -21,8 +21,8 @@ use crate::prelude::*; use crate::sample::builder::SampleBuilder; use crate::sample::{QoSBuilder, SourceInfo}; use crate::Id; -use crate::SessionRef; -use crate::Undeclarable; +use crate::api::session::SessionRef; +use crate::api::session::Undeclarable; #[cfg(feature = "unstable")] use crate::{query::ReplyKeyExpr, sample::Attachment}; use std::fmt; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 1fc6e82b46..64f8d5e026 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -13,13 +13,13 @@ // //! Subscribing primitives. -use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; +use crate::api::session::Undeclarable; +use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::prelude::Locality; use crate::sample::Sample; use crate::Id; -use crate::Undeclarable; -use crate::{Result as ZResult, SessionRef}; +use crate::{api::session::SessionRef, Result as ZResult}; use std::fmt; use std::future::Ready; use std::ops::{Deref, DerefMut}; diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 5fd3edd985..f64784399c 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -14,7 +14,7 @@ use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::sample::builder::QoSBuilderTrait; -use zenoh::{publication::Priority, SessionDeclarations}; +use zenoh::{publication::Priority, session::SessionDeclarations}; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); From 2656d9f900ab5a0c231ae684a39f8d1627c8b81b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 20:06:01 +0200 Subject: [PATCH 101/357] restored incorrectly committed file --- .../src/replica/storage.rs | 378 ++++++++---------- 1 file changed, 160 insertions(+), 218 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 69ecf9477c..06c5882408 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -19,15 +19,19 @@ use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; use std::collections::{HashMap, HashSet}; -use std::str::FromStr; +use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; +use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; -use zenoh::query::ConsolidationMode; +use zenoh::query::{ConsolidationMode, QueryTarget}; +use zenoh::sample::builder::SampleBuilder; +use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; -use zenoh::{Result as ZResult, session::Session}; +use zenoh::value::Value; +use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, Storage, StorageInsertionResult, StoredData}; +use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; @@ -38,148 +42,15 @@ use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; -#[derive(Clone, Debug)] -pub enum StorageSampleKind { - Put(Value), - Delete, -} - -#[derive(Clone, Debug)] -pub struct StorageSample { - pub key_expr: KeyExpr<'static>, - pub timestamp: Timestamp, - pub kind: StorageSampleKind, -} - -impl From for StorageSample { - fn from(sample: Sample) -> Self { - let timestamp = *sample.timestamp().unwrap_or(&new_reception_timestamp()); - // TODO: add API for disassembly of Sample - let key_expr = sample.key_expr().clone(); - let payload = sample.payload().clone(); - let encoding = sample.encoding().clone(); - let kind = match sample.kind() { - SampleKind::Put => StorageSampleKind::Put(Value::new(payload).with_encoding(encoding)), - SampleKind::Delete => StorageSampleKind::Delete, - }; - StorageSample { - key_expr, - timestamp, - kind, - } - } -} - #[derive(Clone)] -enum Update { - Put(StoredData), - Delete(Timestamp), -} - -impl From for Update { - fn from(value: StorageSample) -> Self { - match value.kind { - StorageSampleKind::Put(data) => Update::Put(StoredData { - value: data, - timestamp: value.timestamp, - }), - StorageSampleKind::Delete => Update::Delete(value.timestamp), - } - } -} - -impl Update { - fn timestamp(&self) -> &Timestamp { - match self { - Update::Put(data) => &data.timestamp, - Update::Delete(ts) => ts, - } - } -} - -// implement from String for Update -impl TryFrom for Update { - type Error = zenoh::Error; - - fn try_from(value: String) -> Result { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&value)?; - let mut payload = ZBuf::default(); - for slice in result.3 { - payload.push_zslice(slice.to_vec().into()); - } - let value = Value::new(payload).with_encoding(result.2); - let timestamp = Timestamp::from_str(&result.1).map_err(|_| "Error parsing timestamp")?; - if result.0.eq(&(SampleKind::Put).to_string()) { - Ok(Update::Put(StoredData { value, timestamp })) - } else { - Ok(Update::Delete(timestamp)) - } - } -} - -// implement to_string for Update -impl ToString for Update { - fn to_string(&self) -> String { - let result = match self { - Update::Put(data) => ( - SampleKind::Put.to_string(), - data.timestamp.to_string(), - data.value.encoding.to_string(), - data.value.payload.slices().collect::>(), - ), - Update::Delete(ts) => ( - SampleKind::Delete.to_string(), - ts.to_string(), - "".to_string(), - vec![], - ), - }; - serde_json::to_string_pretty(&result).unwrap() - } -} - -trait IntoStorageSample { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>; -} - -impl IntoStorageSample for StoredData { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>, - { - StorageSample { - key_expr: key_expr.into(), - timestamp: self.timestamp, - kind: StorageSampleKind::Put(self.value), - } - } -} - -impl IntoStorageSample for Update { - fn into_sample(self, key_expr: IntoKeyExpr) -> StorageSample - where - IntoKeyExpr: Into>, - { - match self { - Update::Put(data) => StorageSample { - key_expr: key_expr.into(), - timestamp: data.timestamp, - kind: StorageSampleKind::Put(data.value), - }, - Update::Delete(ts) => StorageSample { - key_expr: key_expr.into(), - timestamp: ts, - kind: StorageSampleKind::Delete, - }, - } - } +struct Update { + kind: SampleKind, + data: StoredData, } pub struct ReplicationService { pub empty_start: bool, - pub aligner_updates: Receiver, + pub aligner_updates: Receiver, pub log_propagation: Sender<(OwnedKeyExpr, Timestamp)>, } @@ -238,11 +109,10 @@ impl StorageService { let saved_wc = std::fs::read_to_string(zenoh_home().join(WILDCARD_UPDATES_FILENAME)).unwrap(); let saved_wc: HashMap = - serde_json::from_str(&saved_wc).unwrap(); // TODO: Remove unwrap + serde_json::from_str(&saved_wc).unwrap(); let mut wildcard_updates = storage_service.wildcard_updates.write().await; for (k, data) in saved_wc { - wildcard_updates.insert(&k, Update::try_from(data).unwrap()); - // TODO: Remove unwrap + wildcard_updates.insert(&k, construct_update(data)); } } } @@ -313,7 +183,7 @@ impl StorageService { log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { - self.process_sample(sample.into()).await; + self.process_sample(sample).await; } }, // on query on key_expr @@ -353,15 +223,16 @@ impl StorageService { select!( // on sample for key_expr sample = storage_sub.recv_async() => { - let mut sample = match sample { + let sample = match sample { Ok(sample) => sample, Err(e) => { log::error!("Error in sample: {}", e); continue; } }; - sample.ensure_timestamp(); - self.process_sample(sample.into()).await; + let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); + let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); + self.process_sample(sample).await; }, // on query on key_expr query = storage_queryable.recv_async() => { @@ -391,48 +262,61 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin - async fn process_sample(&self, sample: StorageSample) { + async fn process_sample(&self, sample: Sample) { log::trace!("[STORAGE] Processing sample: {:?}", sample); - // if wildcard, update wildcard_updates - if sample.key_expr.is_wild() { + if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr.is_wild() { - self.get_matching_keys(&sample.key_expr).await + let matching_keys = if sample.key_expr().is_wild() { + self.get_matching_keys(sample.key_expr()).await } else { - vec![sample.key_expr.clone().into()] + vec![sample.key_expr().clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr, + sample.key_expr(), matching_keys ); for k in matching_keys { if !self - .is_deleted(&k.clone(), &sample.timestamp) + .is_deleted(&k.clone(), sample.timestamp().unwrap()) .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, &sample.timestamp).await)) + && self.is_latest(&k, sample.timestamp().unwrap()).await)) { log::trace!( "Sample `{:?}` identified as neded processing for key {}", sample, - &k + k ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store = - match self.ovderriding_wild_update(&k, &sample.timestamp).await { - Some(overriding_update) => overriding_update.into_sample(k.clone()), - - None => sample.clone().into(), - }; + let sample_to_store: Sample = if let Some(update) = self + .ovderriding_wild_update(&k, sample.timestamp().unwrap()) + .await + { + match update.kind { + SampleKind::Put => { + SampleBuilder::put(KeyExpr::from(k.clone()), update.data.value.payload) + .encoding(update.data.value.encoding) + .timestamp(update.data.timestamp) + .into() + } + SampleKind::Delete => SampleBuilder::delete(KeyExpr::from(k.clone())) + .timestamp(update.data.timestamp) + .into(), + } + } else { + SampleBuilder::from(sample.clone()) + .keyexpr(k.clone()) + .into() + }; - let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { + let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -440,21 +324,25 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = match sample_to_store.kind { - StorageSampleKind::Put(data) => { + let result = match sample.kind() { + SampleKind::Put => { storage .put( stripped_key, - data, - sample_to_store.timestamp, + Value::new(sample_to_store.payload().clone()) + .encoding(sample_to_store.encoding().clone()), + *sample_to_store.timestamp().unwrap(), ) .await - }, - StorageSampleKind::Delete => { + } + SampleKind::Delete => { // register a tombstone - self.mark_tombstone(&k, sample_to_store.timestamp).await; - storage.delete(stripped_key, sample_to_store.timestamp).await - }, + self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) + .await; + storage + .delete(stripped_key, *sample_to_store.timestamp().unwrap()) + .await + } }; drop(storage); if self.replication.is_some() @@ -466,7 +354,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), sample_to_store.timestamp)); + .send((k.clone(), *sample_to_store.timestamp().unwrap())); match sending { Ok(_) => (), Err(e) => { @@ -497,16 +385,26 @@ impl StorageService { } } - async fn register_wildcard_update(&self, sample: StorageSample) { + async fn register_wildcard_update(&self, sample: Sample) { // @TODO: change into a better store that does incremental writes - let key = sample.key_expr.clone(); + let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; - wildcards.insert(&key, sample.into()); + let timestamp = *sample.timestamp().unwrap(); + wildcards.insert( + &key, + Update { + kind: sample.kind(), + data: StoredData { + value: Value::from(sample), + timestamp, + }, + }, + ); if self.capability.persistence.eq(&Persistence::Durable) { // flush to disk to makeit durable let mut serialized_data = HashMap::new(); for (k, update) in wildcards.key_value_pairs() { - serialized_data.insert(k, update.to_string()); + serialized_data.insert(k, serialize_update(update)); } if let Err(e) = std::fs::write( zenoh_home().join(WILDCARD_UPDATES_FILENAME), @@ -535,36 +433,34 @@ impl StorageService { let mut update = None; for node in wildcards.intersecting_keys(key_expr) { let weight = wildcards.weight_at(&node); - if let Some(weight) = weight { - if weight.timestamp() > ts { - // if the key matches a wild card update, check whether it was saved in storage - // remember that wild card updates change only existing keys - let stripped_key = match self.strip_prefix(&key_expr.into()) { - Ok(stripped) => stripped, - Err(e) => { - log::error!("{}", e); - break; - } - }; - let mut storage = self.storage.lock().await; - match storage.get(stripped_key, "").await { - Ok(stored_data) => { - for entry in stored_data { - if entry.timestamp > *ts { - return None; - } + if weight.is_some() && weight.unwrap().data.timestamp > *ts { + // if the key matches a wild card update, check whether it was saved in storage + // remember that wild card updates change only existing keys + let stripped_key = match self.strip_prefix(&key_expr.into()) { + Ok(stripped) => stripped, + Err(e) => { + log::error!("{}", e); + break; + } + }; + let mut storage = self.storage.lock().await; + match storage.get(stripped_key, "").await { + Ok(stored_data) => { + for entry in stored_data { + if entry.timestamp > *ts { + return None; } } - Err(e) => { - log::warn!( - "Storage '{}' raised an error fetching a query on key {} : {}", - self.name, - key_expr, - e - ); - ts = weight.timestamp(); - update = Some(weight.clone()); - } + } + Err(e) => { + log::warn!( + "Storage '{}' raised an error fetching a query on key {} : {}", + self.name, + key_expr, + e + ); + ts = &weight.unwrap().data.timestamp; + update = Some(weight.unwrap().clone()); } } } @@ -617,8 +513,13 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let sample = entry.into_sample(key.clone()); - if let Err(e) = q.reply_sample(sample).res().await { + if let Err(e) = q + .reply(key.clone(), entry.value.payload) + .encoding(entry.value.encoding) + .timestamp(entry.timestamp) + .res() + .await + { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -644,13 +545,13 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - let Value { - payload, encoding, .. - } = entry.value; - let sample = Sample::put(q.key_expr().clone(), payload) - .with_encoding(encoding) - .with_timestamp(entry.timestamp); - if let Err(e) = q.reply_sample(sample).res().await { + if let Err(e) = q + .reply(q.key_expr().clone(), entry.value.payload) + .encoding(entry.value.encoding) + .timestamp(entry.timestamp) + .res() + .await + { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -757,7 +658,7 @@ impl StorageService { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - self.process_sample(sample.into()).await; + self.process_sample(sample).await; } Err(e) => log::warn!( "Storage '{}' received an error to align query: {:?}", @@ -770,6 +671,47 @@ impl StorageService { } } +fn serialize_update(update: &Update) -> String { + let Update { + kind, + data: + StoredData { + value: Value { + payload, encoding, .. + }, + timestamp, + }, + } = update; + let zbuf: ZBuf = payload.into(); + + let result = ( + kind.to_string(), + timestamp.to_string(), + encoding.to_string(), + zbuf.slices().collect::>(), + ); + serde_json::to_string_pretty(&result).unwrap() +} + +fn construct_update(data: String) -> Update { + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() + let mut payload = ZBuf::default(); + for slice in result.3 { + payload.push_zslice(slice.to_vec().into()); + } + let value = Value::new(payload).encoding(result.2); + let data = StoredData { + value, + timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() + }; + let kind = if result.0.eq(&(SampleKind::Put).to_string()) { + SampleKind::Put + } else { + SampleKind::Delete + }; + Update { kind, data } +} + // Periodic event cleaning-up data info for old metadata struct GarbageCollectionEvent { config: GarbageCollectionConfig, @@ -801,7 +743,7 @@ impl Timed for GarbageCollectionEvent { let mut to_be_removed = HashSet::new(); for (k, update) in wildcard_updates.key_value_pairs() { - let ts = update.timestamp(); + let ts = update.data.timestamp; if ts.get_time() < &time_limit { // mark key to be removed to_be_removed.insert(k); From acbf5517fed172d00819ad374a3f8aae7fa98ab3 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 20:17:15 +0200 Subject: [PATCH 102/357] session move build fix --- .../zenoh-plugin-storage-manager/src/replica/storage.rs | 2 +- plugins/zenoh-plugin-storage-manager/tests/operations.rs | 9 +++++---- plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 9 +++++---- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 06c5882408..39e6d34f6b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -29,7 +29,7 @@ use zenoh::sample::builder::SampleBuilder; use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; -use zenoh::{Result as ZResult, Session}; +use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; use zenoh_keyexpr::key_expr::OwnedKeyExpr; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 36162f01c2..6b64bbd742 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -23,23 +23,24 @@ use async_std::task; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::session::Session; use zenoh::{prelude::Config, time::Timestamp}; use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; -async fn put_data(session: &zenoh::Session, key_expr: &str, value: &str, _timestamp: Timestamp) { +async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { println!("Putting Data ('{key_expr}': '{value}')..."); // @TODO: how to add timestamp metadata with put, not manipulating sample... session.put(key_expr, value).res().await.unwrap(); } -async fn delete_data(session: &zenoh::Session, key_expr: &str, _timestamp: Timestamp) { +async fn delete_data(session: &Session, key_expr: &str, _timestamp: Timestamp) { println!("Deleting Data '{key_expr}'..."); // @TODO: how to add timestamp metadata with delete, not manipulating sample... session.delete(key_expr).res().await.unwrap(); } -async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { +async fn get_data(session: &Session, key_expr: &str) -> Vec { let replies: Vec = session .get(key_expr) .res() @@ -83,7 +84,7 @@ async fn test_updates_in_order() { let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).res().await.unwrap(); sleep(std::time::Duration::from_secs(1)); diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 5a71dc23f0..864ec5b79e 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -24,23 +24,24 @@ use async_std::task; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::session::Session; use zenoh::{prelude::Config, time::Timestamp}; use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; -async fn put_data(session: &zenoh::Session, key_expr: &str, value: &str, _timestamp: Timestamp) { +async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { println!("Putting Data ('{key_expr}': '{value}')..."); // @TODO: how to add timestamp metadata with put, not manipulating sample... session.put(key_expr, value).res().await.unwrap(); } -async fn delete_data(session: &zenoh::Session, key_expr: &str, _timestamp: Timestamp) { +async fn delete_data(session: &Session, key_expr: &str, _timestamp: Timestamp) { println!("Deleting Data '{key_expr}'..."); // @TODO: how to add timestamp metadata with delete, not manipulating sample... session.delete(key_expr).res().await.unwrap(); } -async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { +async fn get_data(session: &Session, key_expr: &str) -> Vec { let replies: Vec = session .get(key_expr) .res() @@ -84,7 +85,7 @@ async fn test_wild_card_in_order() { let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).res().await.unwrap(); sleep(std::time::Duration::from_secs(1)); // put *, ts: 1 From 996accf66348632333a4abb40df34f5100324e9a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 21:48:05 +0200 Subject: [PATCH 103/357] sample and value into api --- zenoh/src/{sample/builder.rs => api/builders/sample.rs} | 0 zenoh/src/{sample/mod.rs => api/sample.rs} | 0 zenoh/src/{ => api}/value.rs | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{sample/builder.rs => api/builders/sample.rs} (100%) rename zenoh/src/{sample/mod.rs => api/sample.rs} (100%) rename zenoh/src/{ => api}/value.rs (100%) diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/api/builders/sample.rs similarity index 100% rename from zenoh/src/sample/builder.rs rename to zenoh/src/api/builders/sample.rs diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/api/sample.rs similarity index 100% rename from zenoh/src/sample/mod.rs rename to zenoh/src/api/sample.rs diff --git a/zenoh/src/value.rs b/zenoh/src/api/value.rs similarity index 100% rename from zenoh/src/value.rs rename to zenoh/src/api/value.rs From 554a65c2bcd8947899c37b2040915a461c77482f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 21:48:14 +0200 Subject: [PATCH 104/357] sample and value into api --- .../src/replica/aligner.rs | 4 +-- .../src/replica/storage.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 4 +-- zenoh/src/admin.rs | 4 +-- zenoh/src/api.rs | 5 ++- zenoh/src/api/builders.rs | 15 +++++++++ zenoh/src/api/builders/sample.rs | 10 +++--- zenoh/src/api/sample.rs | 14 ++++----- zenoh/src/api/session.rs | 16 +++++----- zenoh/src/api/value.rs | 3 +- zenoh/src/lib.rs | 31 ++++++++++++++----- zenoh/src/net/runtime/adminspace.rs | 6 ++-- zenoh/src/prelude.rs | 12 +++---- zenoh/src/publication.rs | 8 ++--- zenoh/src/query.rs | 6 ++-- zenoh/src/queryable.rs | 10 +++--- zenoh/src/subscriber.rs | 2 +- zenoh/tests/attachments.rs | 2 +- zenoh/tests/qos.rs | 2 +- zenoh/tests/routing.rs | 2 +- 20 files changed, 96 insertions(+), 62 deletions(-) create mode 100644 zenoh/src/api/builders.rs diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 9c54bcf461..315a6bbb27 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,11 +18,11 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; +use zenoh::builders::SampleBuilder; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::SampleBuilder; -use zenoh::time::Timestamp; use zenoh::session::Session; +use zenoh::time::Timestamp; pub struct Aligner { session: Arc, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 39e6d34f6b..c89fd94f04 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,9 +23,9 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; +use zenoh::builders::SampleBuilder; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::builder::SampleBuilder; use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 4a9469c835..75386fd907 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -17,14 +17,14 @@ use std::future::Ready; use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; +use zenoh::builders::SampleBuilder; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::builder::SampleBuilder; +use zenoh::session::SessionRef; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; -use zenoh::session::SessionRef; use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; use crate::ExtractSample; diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 260617cda2..cbb009eaf2 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -12,12 +12,12 @@ // ZettaScale Zenoh Team, // use crate::{ + api::sample::DataInfo, encoding::Encoding, keyexpr, prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, - sample::DataInfo, - Payload, Session + Payload, Session, }; use std::{ collections::hash_map::DefaultHasher, diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 1e7cec5380..d7e6259299 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -12,5 +12,8 @@ // ZettaScale Zenoh Team, // +pub(crate) mod builders; pub(crate) mod key_expr; -pub(crate) mod session; \ No newline at end of file +pub(crate) mod sample; +pub(crate) mod session; +pub(crate) mod value; diff --git a/zenoh/src/api/builders.rs b/zenoh/src/api/builders.rs new file mode 100644 index 0000000000..09d12657a5 --- /dev/null +++ b/zenoh/src/api/builders.rs @@ -0,0 +1,15 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub(crate) mod sample; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 5fab36617d..87b2b928ff 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -15,18 +15,18 @@ use std::marker::PhantomData; #[cfg(feature = "unstable")] -use crate::sample::Attachment; -use crate::sample::QoS; -use crate::sample::QoSBuilder; +use crate::api::sample::Attachment; +use crate::api::sample::QoS; +use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] -use crate::sample::SourceInfo; +use crate::api::sample::SourceInfo; +use crate::api::value::Value; use crate::Encoding; use crate::KeyExpr; use crate::Payload; use crate::Priority; use crate::Sample; use crate::SampleKind; -use crate::Value; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 6e457578a3..649a375b1a 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -13,21 +13,22 @@ // //! Sample primitives +use crate::api::builders::sample::{QoSBuilderTrait, ValueBuilderTrait}; +use crate::api::key_expr::KeyExpr; +use crate::api::value::Value; use crate::encoding::Encoding; use crate::payload::Payload; -use crate::prelude::{KeyExpr, Value}; -use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; use crate::time::Timestamp; use crate::Priority; #[zenoh_macros::unstable] +pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; +#[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::ext::QoSType; use zenoh_protocol::{core::CongestionControl, zenoh}; -pub mod builder; - pub type SourceSn = u64; /// The locality of samples to be received by subscribers or targeted by publishers. @@ -154,7 +155,7 @@ pub struct SourceInfo { #[cfg(feature = "unstable")] fn source_info_stack_size() { use crate::{ - sample::{SourceInfo, SourceSn}, + api::sample::{SourceInfo, SourceSn}, ZenohId, }; @@ -467,9 +468,6 @@ impl TryFrom for SampleKind { } } -#[zenoh_macros::unstable] -pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; - /// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. pub struct SampleFields { pub key_expr: KeyExpr<'static>, diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 89cd249bdb..a197253997 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -12,12 +12,18 @@ // ZettaScale Zenoh Team, // use crate::admin; +use crate::api::key_expr::KeyExprInner; +#[cfg(feature = "unstable")] +use crate::api::sample::Attachment; +use crate::api::sample::DataInfo; +use crate::api::sample::DataInfoIntoSample; +use crate::api::sample::QoS; +use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; use crate::encoding::Encoding; use crate::handlers::{Callback, DefaultHandler}; use crate::info::*; -use crate::api::key_expr::KeyExprInner; #[zenoh_macros::unstable] use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; @@ -29,11 +35,6 @@ use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; use crate::query::*; use crate::queryable::*; -#[cfg(feature = "unstable")] -use crate::sample::Attachment; -use crate::sample::DataInfo; -use crate::sample::DataInfoIntoSample; -use crate::sample::QoS; use crate::selector::TIME_RANGE_KEY; use crate::subscriber::*; use crate::Id; @@ -42,9 +43,7 @@ use crate::Sample; use crate::SampleKind; use crate::Selector; use crate::SourceInfo; -use crate::Value; use log::{error, trace, warn}; -use zenoh_core::Resolvable; use std::collections::HashMap; use std::convert::TryFrom; use std::convert::TryInto; @@ -59,6 +58,7 @@ use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; +use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; #[cfg(feature = "unstable")] use zenoh_protocol::network::declare::SubscriberId; diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index 92a87cb6c5..a225f2b3d8 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -13,7 +13,8 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload, sample::builder::ValueBuilderTrait}; +use crate::api::builders::sample::ValueBuilderTrait; +use crate::{encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 2f1beb5413..a789fc17a9 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -121,11 +121,19 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; +pub mod builders { + pub use crate::api::builders::sample::QoSBuilderTrait; + pub use crate::api::builders::sample::SampleBuilder; + pub use crate::api::builders::sample::SampleBuilderTrait; + pub use crate::api::builders::sample::TimestampBuilderTrait; + pub use crate::api::builders::sample::ValueBuilderTrait; +} + pub mod key_expr { - pub use crate::api::key_expr::keyexpr; - pub use crate::api::key_expr::OwnedKeyExpr; pub use crate::api::key_expr::kedefine; pub use crate::api::key_expr::keformat; + pub use crate::api::key_expr::keyexpr; + pub use crate::api::key_expr::OwnedKeyExpr; // keyexpr format macro support pub mod format { pub use crate::api::key_expr::format::*; @@ -136,11 +144,22 @@ pub mod key_expr { } pub mod session { - pub use crate::api::session::open; pub use crate::api::session::init; + pub use crate::api::session::open; pub use crate::api::session::Session; - pub use crate::api::session::SessionRef; pub use crate::api::session::SessionDeclarations; + pub use crate::api::session::SessionRef; +} + +pub mod sample { + pub use crate::api::sample::Attachment; + pub use crate::api::sample::Locality; + pub use crate::api::sample::Sample; + pub use crate::api::sample::SampleKind; +} + +pub mod value { + pub use crate::api::value::Value; } mod admin; @@ -163,9 +182,7 @@ pub mod prelude; pub mod publication; pub mod query; pub mod queryable; -pub mod sample; pub mod subscriber; -pub mod value; #[cfg(feature = "shared-memory")] pub use zenoh_shm as shm; @@ -234,4 +251,4 @@ where config: config.try_into().map_err(|e| e.into()), handler: DefaultHandler, } -} \ No newline at end of file +} diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 148d9eddab..26ba22621e 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -12,16 +12,16 @@ // ZettaScale Zenoh Team, use super::routing::dispatcher::face::Face; use super::Runtime; -use crate::encoding::Encoding; +use crate::api::builders::sample::ValueBuilderTrait; use crate::api::key_expr::KeyExpr; +use crate::api::value::Value; +use crate::encoding::Encoding; use crate::net::primitives::Primitives; use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use crate::queryable::Query; use crate::queryable::QueryInner; -use crate::sample::builder::ValueBuilderTrait; -use crate::value::Value; use log::{error, trace}; use serde_json::json; use std::collections::HashMap; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index d059a82afc..4aff9654cc 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -43,25 +43,25 @@ pub(crate) mod common { pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::api::value::Value; pub use crate::encoding::Encoding; /// The encoding of a zenoh `Value`. pub use crate::payload::{Deserialize, Payload, Serialize}; - pub use crate::value::Value; #[zenoh_macros::unstable] - pub use crate::sample::Locality; + pub use crate::api::sample::Locality; + #[zenoh_macros::unstable] + pub use crate::api::sample::SourceInfo; + pub use crate::api::sample::{Sample, SampleKind}; #[cfg(not(feature = "unstable"))] pub(crate) use crate::sample::Locality; - #[zenoh_macros::unstable] - pub use crate::sample::SourceInfo; - pub use crate::sample::{Sample, SampleKind}; pub use crate::publication::Priority; #[zenoh_macros::unstable] pub use crate::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - pub use crate::sample::builder::{ + pub use crate::api::builders::sample::{ QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f634a14dd1..3f528b41b7 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -13,13 +13,13 @@ // //! Publishing primitives. -use crate::net::primitives::Primitives; -use crate::prelude::*; #[zenoh_macros::unstable] -use crate::sample::Attachment; -use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; +use crate::api::sample::Attachment; +use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; +use crate::net::primitives::Primitives; +use crate::prelude::*; #[cfg(feature = "unstable")] use crate::{ handlers::{Callback, DefaultHandler, IntoHandler}, diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index cb1116130d..b1dd5a5d73 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -13,11 +13,11 @@ // //! Query primitives. +#[zenoh_macros::unstable] +use crate::api::sample::Attachment; +use crate::api::sample::QoSBuilder; use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::sample::Attachment; -use crate::sample::QoSBuilder; use crate::Session; use std::collections::HashMap; use std::future::Ready; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 447dfc81b6..50190ff891 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -14,17 +14,17 @@ //! Queryable primitives. +use crate::api::builders::sample::SampleBuilder; +use crate::api::sample::{QoSBuilder, SourceInfo}; +use crate::api::session::SessionRef; +use crate::api::session::Undeclarable; use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::builder::SampleBuilder; -use crate::sample::{QoSBuilder, SourceInfo}; use crate::Id; -use crate::api::session::SessionRef; -use crate::api::session::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment}; +use crate::{api::sample::Attachment, query::ReplyKeyExpr}; use std::fmt; use std::future::Ready; use std::ops::Deref; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 64f8d5e026..239ea488a9 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -14,10 +14,10 @@ //! Subscribing primitives. use crate::api::key_expr::KeyExpr; +use crate::api::sample::Sample; use crate::api::session::Undeclarable; use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::prelude::Locality; -use crate::sample::Sample; use crate::Id; use crate::{api::session::SessionRef, Result as ZResult}; use std::fmt; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 9fb99b7cc0..52508cf27e 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -61,7 +61,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait, sample::Attachment}; + use zenoh::{builders::SampleBuilderTrait, prelude::sync::*, sample::Attachment}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index f64784399c..2eeee9c9df 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; +use zenoh::builders::QoSBuilderTrait; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{publication::Priority, session::SessionDeclarations}; use zenoh_core::ztimeout; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 1031630a68..b3c6758ec3 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -16,9 +16,9 @@ use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh::builders::QoSBuilderTrait; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::Result; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; From 5735bf31cb47cb2748a53c4d54b22668cf76e4e9 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 21:53:13 +0200 Subject: [PATCH 105/357] encoding moved to api --- zenoh/src/admin.rs | 2 +- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/encoding.rs | 0 zenoh/src/api/sample.rs | 2 +- zenoh/src/api/session.rs | 2 +- zenoh/src/api/value.rs | 2 +- zenoh/src/lib.rs | 5 ++++- zenoh/src/prelude.rs | 2 +- 8 files changed, 10 insertions(+), 6 deletions(-) rename zenoh/src/{ => api}/encoding.rs (100%) diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index cbb009eaf2..f7ddb69a37 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use crate::{ + api::encoding::Encoding, api::sample::DataInfo, - encoding::Encoding, keyexpr, prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index d7e6259299..b12a623235 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -13,6 +13,7 @@ // pub(crate) mod builders; +pub(crate) mod encoding; pub(crate) mod key_expr; pub(crate) mod sample; pub(crate) mod session; diff --git a/zenoh/src/encoding.rs b/zenoh/src/api/encoding.rs similarity index 100% rename from zenoh/src/encoding.rs rename to zenoh/src/api/encoding.rs diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 649a375b1a..4a4bc934a8 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -14,9 +14,9 @@ //! Sample primitives use crate::api::builders::sample::{QoSBuilderTrait, ValueBuilderTrait}; +use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; use crate::api::value::Value; -use crate::encoding::Encoding; use crate::payload::Payload; use crate::time::Timestamp; use crate::Priority; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index a197253997..0a9e34f61b 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use crate::admin; +use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExprInner; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; @@ -21,7 +22,6 @@ use crate::api::sample::QoS; use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; -use crate::encoding::Encoding; use crate::handlers::{Callback, DefaultHandler}; use crate::info::*; #[zenoh_macros::unstable] diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index a225f2b3d8..f75abd4241 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -14,7 +14,7 @@ //! Value primitives. use crate::api::builders::sample::ValueBuilderTrait; -use crate::{encoding::Encoding, payload::Payload}; +use crate::{api::encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index a789fc17a9..e4a90ccac8 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -162,6 +162,10 @@ pub mod value { pub use crate::api::value::Value; } +pub mod encoding { + pub use crate::api::encoding::Encoding; +} + mod admin; #[macro_use] @@ -171,7 +175,6 @@ pub use net::runtime; pub mod selector; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; -pub(crate) mod encoding; pub mod handlers; pub mod info; #[cfg(feature = "unstable")] diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 4aff9654cc..61c21b9167 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -43,8 +43,8 @@ pub(crate) mod common { pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::api::encoding::Encoding; pub use crate::api::value::Value; - pub use crate::encoding::Encoding; /// The encoding of a zenoh `Value`. pub use crate::payload::{Deserialize, Payload, Serialize}; From b11ca7595d0a6f6d9725fa7a009cef7dc35dee18 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 21:56:30 +0200 Subject: [PATCH 106/357] payload moved to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/payload.rs | 0 zenoh/src/lib.rs | 9 ++++++++- 3 files changed, 9 insertions(+), 1 deletion(-) rename zenoh/src/{ => api}/payload.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index b12a623235..45f04bf6f4 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -15,6 +15,7 @@ pub(crate) mod builders; pub(crate) mod encoding; pub(crate) mod key_expr; +pub(crate) mod payload; pub(crate) mod sample; pub(crate) mod session; pub(crate) mod value; diff --git a/zenoh/src/payload.rs b/zenoh/src/api/payload.rs similarity index 100% rename from zenoh/src/payload.rs rename to zenoh/src/api/payload.rs diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e4a90ccac8..3e286df035 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -166,6 +166,14 @@ pub mod encoding { pub use crate::api::encoding::Encoding; } +pub mod payload { + pub use crate::api::payload::Deserialize; + pub use crate::api::payload::Payload; + pub use crate::api::payload::PayloadReader; + pub use crate::api::payload::Serialize; + pub use crate::api::payload::StringOrBase64; +} + mod admin; #[macro_use] @@ -179,7 +187,6 @@ pub mod handlers; pub mod info; #[cfg(feature = "unstable")] pub mod liveliness; -pub mod payload; pub mod plugins; pub mod prelude; pub mod publication; From 1574e1c7c79f0bb3f92a4e64ee05dd643ceca261 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:02:42 +0200 Subject: [PATCH 107/357] selector moved to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/selector.rs | 0 zenoh/src/api/session.rs | 7 ++++--- zenoh/src/lib.rs | 8 +++++++- 4 files changed, 12 insertions(+), 4 deletions(-) rename zenoh/src/{ => api}/selector.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 45f04bf6f4..068ff6d3d0 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -17,5 +17,6 @@ pub(crate) mod encoding; pub(crate) mod key_expr; pub(crate) mod payload; pub(crate) mod sample; +pub(crate) mod selector; pub(crate) mod session; pub(crate) mod value; diff --git a/zenoh/src/selector.rs b/zenoh/src/api/selector.rs similarity index 100% rename from zenoh/src/selector.rs rename to zenoh/src/api/selector.rs diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 0a9e34f61b..f50458102e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -13,12 +13,16 @@ // use crate::admin; use crate::api::encoding::Encoding; +use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::DataInfo; use crate::api::sample::DataInfoIntoSample; use crate::api::sample::QoS; +use crate::api::selector::Parameters; +use crate::api::selector::Selector; +use crate::api::selector::TIME_RANGE_KEY; use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; @@ -31,17 +35,14 @@ use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; use crate::payload::Payload; use crate::prelude::Locality; -use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; use crate::query::*; use crate::queryable::*; -use crate::selector::TIME_RANGE_KEY; use crate::subscriber::*; use crate::Id; use crate::Priority; use crate::Sample; use crate::SampleKind; -use crate::Selector; use crate::SourceInfo; use log::{error, trace, warn}; use std::collections::HashMap; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3e286df035..81aaa1545b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -174,13 +174,19 @@ pub mod payload { pub use crate::api::payload::StringOrBase64; } +pub mod selector { + pub use crate::api::selector::Parameter; + pub use crate::api::selector::Parameters; + pub use crate::api::selector::Selector; + pub use crate::api::selector::TIME_RANGE_KEY; +} + mod admin; #[macro_use] mod api; pub(crate) mod net; pub use net::runtime; -pub mod selector; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; pub mod handlers; From 9659ab3edfc94d0840cf024117e7b1497396bd12 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:07:35 +0200 Subject: [PATCH 108/357] info moved to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/info.rs | 0 zenoh/src/api/session.rs | 2 +- zenoh/src/lib.rs | 1 - 4 files changed, 2 insertions(+), 2 deletions(-) rename zenoh/src/{ => api}/info.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 068ff6d3d0..be7ee42051 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -14,6 +14,7 @@ pub(crate) mod builders; pub(crate) mod encoding; +pub(crate) mod info; pub(crate) mod key_expr; pub(crate) mod payload; pub(crate) mod sample; diff --git a/zenoh/src/info.rs b/zenoh/src/api/info.rs similarity index 100% rename from zenoh/src/info.rs rename to zenoh/src/api/info.rs diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index f50458102e..a4634d9d28 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -13,6 +13,7 @@ // use crate::admin; use crate::api::encoding::Encoding; +use crate::api::info::SessionInfo; use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; #[cfg(feature = "unstable")] @@ -27,7 +28,6 @@ use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; use crate::handlers::{Callback, DefaultHandler}; -use crate::info::*; #[zenoh_macros::unstable] use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 81aaa1545b..60e1183670 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -190,7 +190,6 @@ pub use net::runtime; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; pub mod handlers; -pub mod info; #[cfg(feature = "unstable")] pub mod liveliness; pub mod plugins; From a8de62e7d41a9fac9debd9f95d9743b507f226d8 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:19:19 +0200 Subject: [PATCH 109/357] subscriber moved to api --- zenoh/src/{ => api}/subscriber.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{ => api}/subscriber.rs (100%) diff --git a/zenoh/src/subscriber.rs b/zenoh/src/api/subscriber.rs similarity index 100% rename from zenoh/src/subscriber.rs rename to zenoh/src/api/subscriber.rs From 526c1b4fe2b8dacc24194d881d5a0c50f6c6d767 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:19:29 +0200 Subject: [PATCH 110/357] subscriber moved to api --- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/api.rs | 1 + zenoh/src/api/session.rs | 4 +++- zenoh/src/lib.rs | 21 +++++++++++-------- zenoh/src/liveliness.rs | 5 +++-- zenoh/tests/attachments.rs | 2 +- zenoh/tests/qos.rs | 2 +- zenoh/tests/routing.rs | 2 +- 10 files changed, 25 insertions(+), 18 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 315a6bbb27..23bf066263 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,9 +18,9 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; -use zenoh::builders::SampleBuilder; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::sample::SampleBuilder; use zenoh::session::Session; use zenoh::time::Timestamp; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index c89fd94f04..1abe311b65 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,9 +23,9 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; -use zenoh::builders::SampleBuilder; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; +use zenoh::sample::SampleBuilder; use zenoh::sample::{Sample, SampleKind}; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 75386fd907..24501f9eca 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -17,10 +17,10 @@ use std::future::Ready; use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; -use zenoh::builders::SampleBuilder; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; +use zenoh::sample::SampleBuilder; use zenoh::session::SessionRef; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index be7ee42051..a9f08ed21c 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -20,4 +20,5 @@ pub(crate) mod payload; pub(crate) mod sample; pub(crate) mod selector; pub(crate) mod session; +pub(crate) mod subscriber; pub(crate) mod value; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index a4634d9d28..96ddd63ee6 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -24,6 +24,8 @@ use crate::api::sample::QoS; use crate::api::selector::Parameters; use crate::api::selector::Selector; use crate::api::selector::TIME_RANGE_KEY; +use crate::api::subscriber::SubscriberBuilder; +use crate::api::subscriber::SubscriberState; use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; @@ -38,7 +40,6 @@ use crate::prelude::Locality; use crate::publication::*; use crate::query::*; use crate::queryable::*; -use crate::subscriber::*; use crate::Id; use crate::Priority; use crate::Sample; @@ -61,6 +62,7 @@ use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; +use zenoh_protocol::core::Reliability; #[cfg(feature = "unstable")] use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::network::AtomicRequestId; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 60e1183670..c762009209 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -121,14 +121,6 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; -pub mod builders { - pub use crate::api::builders::sample::QoSBuilderTrait; - pub use crate::api::builders::sample::SampleBuilder; - pub use crate::api::builders::sample::SampleBuilderTrait; - pub use crate::api::builders::sample::TimestampBuilderTrait; - pub use crate::api::builders::sample::ValueBuilderTrait; -} - pub mod key_expr { pub use crate::api::key_expr::kedefine; pub use crate::api::key_expr::keformat; @@ -152,6 +144,11 @@ pub mod session { } pub mod sample { + pub use crate::api::builders::sample::QoSBuilderTrait; + pub use crate::api::builders::sample::SampleBuilder; + pub use crate::api::builders::sample::SampleBuilderTrait; + pub use crate::api::builders::sample::TimestampBuilderTrait; + pub use crate::api::builders::sample::ValueBuilderTrait; pub use crate::api::sample::Attachment; pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; @@ -181,6 +178,13 @@ pub mod selector { pub use crate::api::selector::TIME_RANGE_KEY; } +pub mod subscriber { + pub use crate::api::subscriber::FlumeSubscriber; + pub use crate::api::subscriber::Reliability; + pub use crate::api::subscriber::Subscriber; + pub use crate::api::subscriber::SubscriberBuilder; +} + mod admin; #[macro_use] @@ -197,7 +201,6 @@ pub mod prelude; pub mod publication; pub mod query; pub mod queryable; -pub mod subscriber; #[cfg(feature = "shared-memory")] pub use zenoh_shm as shm; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 8ce5386c3f..b40b786dad 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -22,11 +22,12 @@ use crate::{query::Reply, Id}; #[zenoh_macros::unstable] use { crate::{ + api::session::SessionRef, + api::session::Undeclarable, + api::subscriber::{Subscriber, SubscriberInner}, handlers::locked, handlers::DefaultHandler, prelude::*, - subscriber::{Subscriber, SubscriberInner}, - api::session::SessionRef, api::session::Undeclarable, }, std::convert::TryInto, std::future::Ready, diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 52508cf27e..7580984c8d 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -61,7 +61,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{builders::SampleBuilderTrait, prelude::sync::*, sample::Attachment}; + use zenoh::{prelude::sync::*, sample::Attachment, sample::SampleBuilderTrait}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 2eeee9c9df..b9f3ab3945 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::builders::QoSBuilderTrait; use zenoh::prelude::r#async::*; +use zenoh::sample::QoSBuilderTrait; use zenoh::{publication::Priority, session::SessionDeclarations}; use zenoh_core::ztimeout; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index b3c6758ec3..9803d62c4e 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -16,9 +16,9 @@ use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; -use zenoh::builders::QoSBuilderTrait; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; +use zenoh::sample::QoSBuilderTrait; use zenoh::Result; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; From cd687d8e17386e17d950a80612f0414f98f0423f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:26:10 +0200 Subject: [PATCH 111/357] publisher to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/publication.rs | 0 zenoh/src/api/session.rs | 5 +++++ zenoh/src/lib.rs | 10 +++++++++- 4 files changed, 15 insertions(+), 1 deletion(-) rename zenoh/src/{ => api}/publication.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index a9f08ed21c..259547740c 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -17,6 +17,7 @@ pub(crate) mod encoding; pub(crate) mod info; pub(crate) mod key_expr; pub(crate) mod payload; +pub(crate) mod publication; pub(crate) mod sample; pub(crate) mod selector; pub(crate) mod session; diff --git a/zenoh/src/publication.rs b/zenoh/src/api/publication.rs similarity index 100% rename from zenoh/src/publication.rs rename to zenoh/src/api/publication.rs diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 96ddd63ee6..5a780d51f0 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -16,6 +16,11 @@ use crate::api::encoding::Encoding; use crate::api::info::SessionInfo; use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; +use crate::api::publication::MatchingListenerState; +use crate::api::publication::MatchingStatus; +use crate::api::publication::PublicationBuilder; +use crate::api::publication::PublicationBuilderDelete; +use crate::api::publication::PublicationBuilderPut; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::DataInfo; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c762009209..140717dd1c 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -185,6 +185,15 @@ pub mod subscriber { pub use crate::api::subscriber::SubscriberBuilder; } +pub mod publication { + pub use crate::api::publication::CongestionControl; + pub use crate::api::publication::Priority; + pub use crate::api::publication::Publisher; + pub use crate::api::publication::PublisherBuilder; + #[zenoh_macros::unstable] + pub use crate::api::publication::PublisherDeclarations; +} + mod admin; #[macro_use] @@ -198,7 +207,6 @@ pub mod handlers; pub mod liveliness; pub mod plugins; pub mod prelude; -pub mod publication; pub mod query; pub mod queryable; #[cfg(feature = "shared-memory")] From 22a938965cd77ec4810e2b08a6360fc98d68995a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:37:45 +0200 Subject: [PATCH 112/357] query to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/query.rs | 0 zenoh/src/api/selector.rs | 5 +++-- zenoh/src/api/session.rs | 5 ++++- zenoh/src/lib.rs | 9 ++++++++- zenoh/src/liveliness.rs | 2 +- zenoh/src/queryable.rs | 4 ++-- 7 files changed, 19 insertions(+), 7 deletions(-) rename zenoh/src/{ => api}/query.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 259547740c..5af3b8ba45 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -18,6 +18,7 @@ pub(crate) mod info; pub(crate) mod key_expr; pub(crate) mod payload; pub(crate) mod publication; +pub(crate) mod query; pub(crate) mod sample; pub(crate) mod selector; pub(crate) mod session; diff --git a/zenoh/src/query.rs b/zenoh/src/api/query.rs similarity index 100% rename from zenoh/src/query.rs rename to zenoh/src/api/query.rs diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index df562e196b..d93a61c4fd 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -214,7 +214,7 @@ impl<'a> Selector<'a> { } #[cfg(any(feature = "unstable", test))] pub(crate) fn accept_any_keyexpr(self, any: bool) -> ZResult> { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; + use crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; let mut s = self.into_owned(); let any_selparam = s.parameter_index(_REPLY_KEY_EXPR_ANY_SEL_PARAM)?; match (any, any_selparam) { @@ -264,7 +264,8 @@ fn selector_accessors() { map_selector.time_range().unwrap() ); let without_any = selector.to_string(); - let with_any = selector.to_string() + "&" + crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; + let with_any = + selector.to_string() + "&" + crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; selector = selector.accept_any_keyexpr(false).unwrap(); assert_eq!(selector.to_string(), without_any); selector = selector.accept_any_keyexpr(true).unwrap(); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 5a780d51f0..71f4100951 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -21,6 +21,9 @@ use crate::api::publication::MatchingStatus; use crate::api::publication::PublicationBuilder; use crate::api::publication::PublicationBuilderDelete; use crate::api::publication::PublicationBuilderPut; +use crate::api::query::GetBuilder; +use crate::api::query::QueryState; +use crate::api::query::Reply; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::DataInfo; @@ -2178,7 +2181,7 @@ impl Primitives for Session { query .selector .parameters() - .get_bools([crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]), + .get_bools([crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]), Ok([true]) ) && !query.selector.key_expr.intersects(&key_expr) { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 140717dd1c..8f509f1b0a 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -194,6 +194,14 @@ pub mod publication { pub use crate::api::publication::PublisherDeclarations; } +pub mod query { + pub use crate::api::query::Mode; + pub use crate::api::query::Reply; + pub use crate::api::query::ReplyKeyExpr; + pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; + pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; +} + mod admin; #[macro_use] @@ -207,7 +215,6 @@ pub mod handlers; pub mod liveliness; pub mod plugins; pub mod prelude; -pub mod query; pub mod queryable; #[cfg(feature = "shared-memory")] pub use zenoh_shm as shm; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index b40b786dad..08145e36f1 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -17,7 +17,7 @@ //! see [`Liveliness`] use zenoh_protocol::network::request; -use crate::{query::Reply, Id}; +use crate::{api::query::Reply, Id}; #[zenoh_macros::unstable] use { diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 50190ff891..f8d8a8a8ab 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -24,7 +24,7 @@ use crate::net::primitives::Primitives; use crate::prelude::*; use crate::Id; #[cfg(feature = "unstable")] -use crate::{api::sample::Attachment, query::ReplyKeyExpr}; +use crate::{api::query::ReplyKeyExpr, api::sample::Attachment}; use std::fmt; use std::future::Ready; use std::ops::Deref; @@ -201,7 +201,7 @@ impl Query { } fn _accepts_any_replies(&self) -> ZResult { self.parameters() - .get_bools([crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]) + .get_bools([crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]) .map(|a| a[0]) } } From 7bc561d4b60835fe1cd3e430aecd7fe4484ed594 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 2 Apr 2024 22:46:10 +0200 Subject: [PATCH 113/357] queryable added to api --- zenoh-ext/src/publication_cache.rs | 2 +- zenoh/src/admin.rs | 2 +- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/queryable.rs | 4 ++-- zenoh/src/api/selector.rs | 2 +- zenoh/src/api/session.rs | 3 +++ zenoh/src/lib.rs | 7 ++++++- zenoh/src/net/runtime/adminspace.rs | 4 ++-- 8 files changed, 17 insertions(+), 8 deletions(-) rename zenoh/src/{ => api}/queryable.rs (99%) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index fdba3af231..b8b7c79cec 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -17,8 +17,8 @@ use std::convert::TryInto; use std::future::Ready; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; -use zenoh::subscriber::FlumeSubscriber; use zenoh::session::SessionRef; +use zenoh::subscriber::FlumeSubscriber; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_result::{bail, ZResult}; use zenoh_util::core::ResolveFuture; diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index f7ddb69a37..678f6d1bbb 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -13,10 +13,10 @@ // use crate::{ api::encoding::Encoding, + api::queryable::Query, api::sample::DataInfo, keyexpr, prelude::sync::{KeyExpr, Locality, SampleKind}, - queryable::Query, Payload, Session, }; use std::{ diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 5af3b8ba45..f7049b4106 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -19,6 +19,7 @@ pub(crate) mod key_expr; pub(crate) mod payload; pub(crate) mod publication; pub(crate) mod query; +pub(crate) mod queryable; pub(crate) mod sample; pub(crate) mod selector; pub(crate) mod session; diff --git a/zenoh/src/queryable.rs b/zenoh/src/api/queryable.rs similarity index 99% rename from zenoh/src/queryable.rs rename to zenoh/src/api/queryable.rs index f8d8a8a8ab..db4df63183 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -251,8 +251,8 @@ impl AsyncResolve for ReplySample<'_> { #[derive(Debug)] pub struct ReplyBuilderPut { - payload: super::Payload, - encoding: super::Encoding, + payload: Payload, + encoding: Encoding, } #[derive(Debug)] pub struct ReplyBuilderDelete; diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index d93a61c4fd..51b8296634 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -18,7 +18,7 @@ use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh_result::ZResult; pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; -use crate::{api::key_expr::KeyExpr, queryable::Query}; +use crate::{api::key_expr::KeyExpr, api::queryable::Query}; use std::{ borrow::{Borrow, Cow}, diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 71f4100951..519b2c0011 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -24,6 +24,9 @@ use crate::api::publication::PublicationBuilderPut; use crate::api::query::GetBuilder; use crate::api::query::QueryState; use crate::api::query::Reply; +use crate::api::queryable::Query; +use crate::api::queryable::QueryInner; +use crate::api::queryable::QueryableState; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::DataInfo; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 8f509f1b0a..a2df421de8 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -202,6 +202,12 @@ pub mod query { pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; } +pub mod queryable { + pub use crate::api::queryable::Query; + pub use crate::api::queryable::Queryable; + pub use crate::api::queryable::QueryableBuilder; +} + mod admin; #[macro_use] @@ -215,7 +221,6 @@ pub mod handlers; pub mod liveliness; pub mod plugins; pub mod prelude; -pub mod queryable; #[cfg(feature = "shared-memory")] pub use zenoh_shm as shm; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 26ba22621e..708d2bb349 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -14,14 +14,14 @@ use super::routing::dispatcher::face::Face; use super::Runtime; use crate::api::builders::sample::ValueBuilderTrait; use crate::api::key_expr::KeyExpr; +use crate::api::queryable::Query; +use crate::api::queryable::QueryInner; use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; -use crate::queryable::Query; -use crate::queryable::QueryInner; use log::{error, trace}; use serde_json::json; use std::collections::HashMap; From 98ea8aeeeaa2588c04575d298ea0c73409c2bfe7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 14:58:07 +0200 Subject: [PATCH 114/357] handlers to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/handlers.rs | 0 zenoh/src/api/publication.rs | 4 ++-- zenoh/src/api/query.rs | 2 +- zenoh/src/api/queryable.rs | 4 ++-- zenoh/src/api/session.rs | 2 +- zenoh/src/api/subscriber.rs | 2 +- zenoh/src/lib.rs | 10 ++++++++-- zenoh/src/liveliness.rs | 4 ++-- zenoh/src/scouting.rs | 3 +-- 10 files changed, 19 insertions(+), 13 deletions(-) rename zenoh/src/{ => api}/handlers.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index f7049b4106..96b733dd5c 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -14,6 +14,7 @@ pub(crate) mod builders; pub(crate) mod encoding; +pub(crate) mod handlers; pub(crate) mod info; pub(crate) mod key_expr; pub(crate) mod payload; diff --git a/zenoh/src/handlers.rs b/zenoh/src/api/handlers.rs similarity index 100% rename from zenoh/src/handlers.rs rename to zenoh/src/api/handlers.rs diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 3f528b41b7..3e650c5e39 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -22,7 +22,7 @@ use crate::net::primitives::Primitives; use crate::prelude::*; #[cfg(feature = "unstable")] use crate::{ - handlers::{Callback, DefaultHandler, IntoHandler}, + api::handlers::{Callback, DefaultHandler, IntoHandler}, Id, }; use std::future::Ready; @@ -1238,7 +1238,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { where CallbackMut: FnMut(MatchingStatus) + Send + Sync + 'static, { - self.callback(crate::handlers::locked(callback)) + self.callback(crate::api::handlers::locked(callback)) } /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoHandler). diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index b1dd5a5d73..1e749132da 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -13,10 +13,10 @@ // //! Query primitives. +use crate::api::handlers::{locked, Callback, DefaultHandler}; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; -use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; use crate::Session; use std::collections::HashMap; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index db4df63183..df76b6441f 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -15,11 +15,11 @@ //! Queryable primitives. use crate::api::builders::sample::SampleBuilder; +use crate::api::encoding::Encoding; +use crate::api::handlers::{locked, DefaultHandler}; use crate::api::sample::{QoSBuilder, SourceInfo}; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; -use crate::encoding::Encoding; -use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; use crate::Id; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 519b2c0011..0bcb57d5e7 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -13,6 +13,7 @@ // use crate::admin; use crate::api::encoding::Encoding; +use crate::api::handlers::{Callback, DefaultHandler}; use crate::api::info::SessionInfo; use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; @@ -40,7 +41,6 @@ use crate::api::subscriber::SubscriberState; use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; -use crate::handlers::{Callback, DefaultHandler}; #[zenoh_macros::unstable] use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 239ea488a9..c549542b3b 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -13,10 +13,10 @@ // //! Subscribing primitives. +use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::sample::Sample; use crate::api::session::Undeclarable; -use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::prelude::Locality; use crate::Id; use crate::{api::session::SessionRef, Result as ZResult}; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index a2df421de8..e8622045a7 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -81,8 +81,8 @@ extern crate zenoh_result; pub(crate) type Id = u32; +use api::handlers::DefaultHandler; use git_version::git_version; -use handlers::DefaultHandler; #[cfg(feature = "unstable")] use prelude::*; use scouting::ScoutBuilder; @@ -208,6 +208,13 @@ pub mod queryable { pub use crate::api::queryable::QueryableBuilder; } +pub mod handlers { + pub use crate::api::handlers::locked; + pub use crate::api::handlers::DefaultHandler; + pub use crate::api::handlers::IntoHandler; + pub use crate::api::handlers::RingBuffer; +} + mod admin; #[macro_use] @@ -216,7 +223,6 @@ pub(crate) mod net; pub use net::runtime; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; -pub mod handlers; #[cfg(feature = "unstable")] pub mod liveliness; pub mod plugins; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 08145e36f1..dac046324d 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -22,11 +22,11 @@ use crate::{api::query::Reply, Id}; #[zenoh_macros::unstable] use { crate::{ + api::handlers::locked, + api::handlers::DefaultHandler, api::session::SessionRef, api::session::Undeclarable, api::subscriber::{Subscriber, SubscriberInner}, - handlers::locked, - handlers::DefaultHandler, prelude::*, }, std::convert::TryInto, diff --git a/zenoh/src/scouting.rs b/zenoh/src/scouting.rs index 49f2b4c01f..bfebc09d2c 100644 --- a/zenoh/src/scouting.rs +++ b/zenoh/src/scouting.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::handlers::{locked, Callback, DefaultHandler}; +use crate::api::handlers::{locked, Callback, DefaultHandler}; use crate::net::runtime::{orchestrator::Loop, Runtime}; - use futures::StreamExt; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; use tokio::net::UdpSocket; From c8338f8748a49fea9a2b7c0e5db8df5fe499ff75 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 17:48:42 +0200 Subject: [PATCH 115/357] scouting to api --- zenoh/src/{ => api}/scouting.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{ => api}/scouting.rs (100%) diff --git a/zenoh/src/scouting.rs b/zenoh/src/api/scouting.rs similarity index 100% rename from zenoh/src/scouting.rs rename to zenoh/src/api/scouting.rs From 36150cbf977ff66066f13450dba2e05274790a24 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 17:48:54 +0200 Subject: [PATCH 116/357] scouting to api --- examples/examples/z_scout.rs | 3 +- zenoh/src/api.rs | 1 + zenoh/src/api/scouting.rs | 47 ++++++++++++++++++++++++++++-- zenoh/src/lib.rs | 56 ++++-------------------------------- zenoh/src/prelude.rs | 6 ++-- zenoh/tests/formatters.rs | 8 +++--- 6 files changed, 61 insertions(+), 60 deletions(-) diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index bc778cfc0f..11ed3a6fd8 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -14,6 +14,7 @@ use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::scouting::WhatAmI; +use zenoh::scouting::scout; #[tokio::main] async fn main() { @@ -21,7 +22,7 @@ async fn main() { env_logger::init(); println!("Scouting..."); - let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, Config::default()) + let receiver = scout(WhatAmI::Peer | WhatAmI::Router, Config::default()) .res() .await .unwrap(); diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 96b733dd5c..bc5b6a9301 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -22,6 +22,7 @@ pub(crate) mod publication; pub(crate) mod query; pub(crate) mod queryable; pub(crate) mod sample; +pub(crate) mod scouting; pub(crate) mod selector; pub(crate) mod session; pub(crate) mod subscriber; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index bfebc09d2c..56f8d4c1a4 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -169,7 +169,7 @@ where { fn res_sync(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); - scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) + _scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) } } @@ -294,7 +294,7 @@ impl Scout { } } -fn scout( +fn _scout( what: WhatAmIMatcher, config: zenoh_config::Config, callback: Callback<'static, Hello>, @@ -336,3 +336,46 @@ fn scout( } Ok(ScoutInner { stop_sender }) } + +/// Scout for routers and/or peers. +/// +/// [`scout`] spawns a task that periodically sends scout messages and waits for [`Hello`](crate::scouting::Hello) replies. +/// +/// Drop the returned [`Scout`](crate::scouting::Scout) to stop the scouting task. +/// +/// # Arguments +/// +/// * `what` - The kind of zenoh process to scout for +/// * `config` - The configuration [`Config`] to use for scouting +/// +/// # Examples +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// use zenoh::scouting::WhatAmI; +/// +/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) +/// .res() +/// .await +/// .unwrap(); +/// while let Ok(hello) = receiver.recv_async().await { +/// println!("{}", hello); +/// } +/// # } +/// ``` +pub fn scout, TryIntoConfig>( + what: I, + config: TryIntoConfig, +) -> ScoutBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: + Into, +{ + ScoutBuilder { + what: what.into(), + config: config.try_into().map_err(|e| e.into()), + handler: DefaultHandler, + } +} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e8622045a7..1129363c43 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -81,13 +81,9 @@ extern crate zenoh_result; pub(crate) type Id = u32; -use api::handlers::DefaultHandler; use git_version::git_version; #[cfg(feature = "unstable")] use prelude::*; -use scouting::ScoutBuilder; -pub use zenoh_macros::{ke, kedefine, keformat, kewrite}; -use zenoh_protocol::core::WhatAmIMatcher; use zenoh_util::concat_enabled_features; /// A zenoh error. @@ -215,6 +211,12 @@ pub mod handlers { pub use crate::api::handlers::RingBuffer; } +pub mod scouting { + pub use crate::api::scouting::scout; + pub use crate::api::scouting::ScoutBuilder; + pub use crate::api::scouting::WhatAmI; +} + mod admin; #[macro_use] @@ -250,49 +252,3 @@ pub mod time { Timestamp::new(now.into(), TimestampId::try_from([1]).unwrap()) } } - -/// Scouting primitives. -pub mod scouting; - -/// Scout for routers and/or peers. -/// -/// [`scout`] spawns a task that periodically sends scout messages and waits for [`Hello`](crate::scouting::Hello) replies. -/// -/// Drop the returned [`Scout`](crate::scouting::Scout) to stop the scouting task. -/// -/// # Arguments -/// -/// * `what` - The kind of zenoh process to scout for -/// * `config` - The configuration [`Config`] to use for scouting -/// -/// # Examples -/// ```no_run -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; -/// -/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) -/// .res() -/// .await -/// .unwrap(); -/// while let Ok(hello) = receiver.recv_async().await { -/// println!("{}", hello); -/// } -/// # } -/// ``` -pub fn scout, TryIntoConfig>( - what: I, - config: TryIntoConfig, -) -> ScoutBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: - Into, -{ - ScoutBuilder { - what: what.into(), - config: config.try_into().map_err(|e| e.into()), - handler: DefaultHandler, - } -} diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 61c21b9167..317cad3a68 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -41,7 +41,7 @@ pub(crate) mod common { pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; - pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; pub use crate::api::encoding::Encoding; pub use crate::api::value::Value; @@ -56,9 +56,9 @@ pub(crate) mod common { #[cfg(not(feature = "unstable"))] pub(crate) use crate::sample::Locality; - pub use crate::publication::Priority; + pub use crate::api::publication::Priority; #[zenoh_macros::unstable] - pub use crate::publication::PublisherDeclarations; + pub use crate::api::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; pub use crate::api::builders::sample::{ diff --git a/zenoh/tests/formatters.rs b/zenoh/tests/formatters.rs index 22600b6cc0..22defaab05 100644 --- a/zenoh/tests/formatters.rs +++ b/zenoh/tests/formatters.rs @@ -13,11 +13,11 @@ // #[test] fn reuse() { - zenoh::kedefine!( + zenoh::key_expr::kedefine!( pub gkeys: "zenoh/${group:*}/${member:*}", ); let mut formatter = gkeys::formatter(); - let k1 = zenoh::keformat!(formatter, group = "foo", member = "bar").unwrap(); + let k1 = zenoh::key_expr::keformat!(formatter, group = "foo", member = "bar").unwrap(); assert_eq!(dbg!(k1).as_str(), "zenoh/foo/bar"); formatter.set("member", "*").unwrap(); @@ -29,8 +29,8 @@ fn reuse() { let k2 = dbg!(&mut formatter).build().unwrap(); assert_eq!(dbg!(k2).as_str(), "zenoh/foo/*"); - let k3 = zenoh::keformat!(formatter, group = "foo", member = "*").unwrap(); + let k3 = zenoh::key_expr::keformat!(formatter, group = "foo", member = "*").unwrap(); assert_eq!(dbg!(k3).as_str(), "zenoh/foo/*"); - zenoh::keformat!(formatter, group = "**", member = "**").unwrap_err(); + zenoh::key_expr::keformat!(formatter, group = "**", member = "**").unwrap_err(); } From b9166141baa645dbdc1a8c3f51cb7efef3ef47d0 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 18:11:34 +0200 Subject: [PATCH 117/357] liveliness to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/liveliness.rs | 0 zenoh/src/api/session.rs | 14 +++++++------- zenoh/src/lib.rs | 6 +++++- 4 files changed, 13 insertions(+), 8 deletions(-) rename zenoh/src/{ => api}/liveliness.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index bc5b6a9301..d06acb8f96 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -17,6 +17,7 @@ pub(crate) mod encoding; pub(crate) mod handlers; pub(crate) mod info; pub(crate) mod key_expr; +pub(crate) mod liveliness; pub(crate) mod payload; pub(crate) mod publication; pub(crate) mod query; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/api/liveliness.rs similarity index 100% rename from zenoh/src/liveliness.rs rename to zenoh/src/api/liveliness.rs diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 0bcb57d5e7..407a6256c3 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -17,6 +17,8 @@ use crate::api::handlers::{Callback, DefaultHandler}; use crate::api::info::SessionInfo; use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; +#[zenoh_macros::unstable] +use crate::api::liveliness::{Liveliness, LivelinessTokenState}; use crate::api::publication::MatchingListenerState; use crate::api::publication::MatchingStatus; use crate::api::publication::PublicationBuilder; @@ -41,8 +43,6 @@ use crate::api::subscriber::SubscriberState; use crate::api::value::Value; use crate::config::Config; use crate::config::Notifier; -#[zenoh_macros::unstable] -use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; @@ -1030,7 +1030,7 @@ impl Session { let declared_sub = origin != Locality::SessionLocal && !key_expr .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS); + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS); let declared_sub = declared_sub @@ -1160,7 +1160,7 @@ impl Session { && !sub_state .key_expr .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS); + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS); if send_forget { // Note: there might be several Subscribers on the same KeyExpr. // Before calling forget_subscriber(key_expr), check if this was the last one. @@ -1270,7 +1270,7 @@ impl Session { let mut state = zwrite!(self.state); log::trace!("declare_liveliness({:?})", key_expr); let id = self.runtime.next_id(); - let key_expr = KeyExpr::from(*crate::liveliness::KE_PREFIX_LIVELINESS / key_expr); + let key_expr = KeyExpr::from(*crate::api::liveliness::KE_PREFIX_LIVELINESS / key_expr); let tok_state = Arc::new(LivelinessTokenState { id, key_expr: key_expr.clone().into_owned(), @@ -2018,7 +2018,7 @@ impl Primitives for Session { if expr .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS) + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) { drop(state); self.handle_data( @@ -2047,7 +2047,7 @@ impl Primitives for Session { if expr .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS) + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) { drop(state); let data_info = DataInfo { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 1129363c43..e15e5fdd33 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -217,6 +217,11 @@ pub mod scouting { pub use crate::api::scouting::WhatAmI; } +pub mod liveliness { + pub use crate::api::liveliness::Liveliness; + pub use crate::api::liveliness::LivelinessSubscriberBuilder; +} + mod admin; #[macro_use] @@ -226,7 +231,6 @@ pub use net::runtime; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; #[cfg(feature = "unstable")] -pub mod liveliness; pub mod plugins; pub mod prelude; #[cfg(feature = "shared-memory")] From a92da3584934b89b71fc0f12450eb5b676e482a1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 22:09:52 +0200 Subject: [PATCH 118/357] time to api --- zenoh/src/api.rs | 1 + zenoh/src/api/sample.rs | 2 +- zenoh/src/api/time.rs | 26 ++++++++++++++++++++++++++ zenoh/src/lib.rs | 22 +++++----------------- 4 files changed, 33 insertions(+), 18 deletions(-) create mode 100644 zenoh/src/api/time.rs diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index d06acb8f96..19ea8afaf2 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -27,4 +27,5 @@ pub(crate) mod scouting; pub(crate) mod selector; pub(crate) mod session; pub(crate) mod subscriber; +pub(crate) mod time; pub(crate) mod value; diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 4a4bc934a8..148b61b9e2 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -18,7 +18,6 @@ use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; use crate::api::value::Value; use crate::payload::Payload; -use crate::time::Timestamp; use crate::Priority; #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; @@ -26,6 +25,7 @@ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; use serde::Serialize; use std::{convert::TryFrom, fmt}; use zenoh_protocol::core::EntityGlobalId; +use zenoh_protocol::core::Timestamp; use zenoh_protocol::network::declare::ext::QoSType; use zenoh_protocol::{core::CongestionControl, zenoh}; diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs new file mode 100644 index 0000000000..eeeebdc6ba --- /dev/null +++ b/zenoh/src/api/time.rs @@ -0,0 +1,26 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::convert::TryFrom; + +use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; + +/// Generates a reception [`Timestamp`] with id=0x01. +/// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) +/// that doesn't contain any timestamp. +pub fn new_reception_timestamp() -> Timestamp { + use std::time::{SystemTime, UNIX_EPOCH}; + + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + Timestamp::new(now.into(), TimestampId::try_from([1]).unwrap()) +} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e15e5fdd33..40c7a942bc 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -222,6 +222,11 @@ pub mod liveliness { pub use crate::api::liveliness::LivelinessSubscriberBuilder; } +pub mod time { + pub use crate::api::time::new_reception_timestamp; + pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; +} + mod admin; #[macro_use] @@ -239,20 +244,3 @@ pub use zenoh_shm as shm; /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub use zenoh_buffers as buffers; - -/// Time related types and functions. -pub mod time { - use std::convert::TryFrom; - - pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; - - /// Generates a reception [`Timestamp`] with id=0x01. - /// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) - /// that doesn't contain any timestamp. - pub fn new_reception_timestamp() -> Timestamp { - use std::time::{SystemTime, UNIX_EPOCH}; - - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - Timestamp::new(now.into(), TimestampId::try_from([1]).unwrap()) - } -} From 60a9003cecb4592099ef3b51f4279a5c396eae22 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 3 Apr 2024 22:16:02 +0200 Subject: [PATCH 119/357] admin to api --- zenoh/src/api.rs | 1 + zenoh/src/{ => api}/admin.rs | 0 zenoh/src/api/session.rs | 2 +- zenoh/src/api/time.rs | 2 +- zenoh/src/lib.rs | 3 --- 5 files changed, 3 insertions(+), 5 deletions(-) rename zenoh/src/{ => api}/admin.rs (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 19ea8afaf2..1af7da37c5 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // +pub(crate) mod admin; pub(crate) mod builders; pub(crate) mod encoding; pub(crate) mod handlers; diff --git a/zenoh/src/admin.rs b/zenoh/src/api/admin.rs similarity index 100% rename from zenoh/src/admin.rs rename to zenoh/src/api/admin.rs diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 407a6256c3..83a57ce260 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::admin; +use crate::api::admin; use crate::api::encoding::Encoding; use crate::api::handlers::{Callback, DefaultHandler}; use crate::api::info::SessionInfo; diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs index eeeebdc6ba..cbdabe3a7e 100644 --- a/zenoh/src/api/time.rs +++ b/zenoh/src/api/time.rs @@ -13,7 +13,7 @@ // use std::convert::TryFrom; -use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; +use zenoh_protocol::core::{Timestamp, TimestampId}; /// Generates a reception [`Timestamp`] with id=0x01. /// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 40c7a942bc..3a5c358ae2 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -227,9 +227,6 @@ pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; } -mod admin; -#[macro_use] - mod api; pub(crate) mod net; pub use net::runtime; From e816f4efdc8591ce4db6ea9c88cc2109e1c037d4 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 4 Apr 2024 11:36:02 +0200 Subject: [PATCH 120/357] Add unicast open/close time tests (#898) * Add unicast open/close time * Remove unused import * Add print to tests --- io/zenoh-transport/tests/unicast_time.rs | 521 +++++++++++++++++++++++ 1 file changed, 521 insertions(+) create mode 100644 io/zenoh-transport/tests/unicast_time.rs diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs new file mode 100644 index 0000000000..75d3ae1d98 --- /dev/null +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -0,0 +1,521 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + convert::TryFrom, + sync::Arc, + time::{Duration, Instant}, +}; +use zenoh_core::ztimeout; +use zenoh_link::EndPoint; +use zenoh_protocol::core::{WhatAmI, ZenohId}; +use zenoh_result::ZResult; +use zenoh_transport::{ + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, + TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, +}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const TIMEOUT_EXPECTED: Duration = Duration::from_secs(5); +const SLEEP: Duration = Duration::from_millis(100); + +macro_rules! ztimeout_expected { + ($f:expr) => { + tokio::time::timeout(TIMEOUT_EXPECTED, $f).await.unwrap() + }; +} + +#[cfg(test)] +#[derive(Default)] +struct SHRouterOpenClose; + +impl TransportEventHandler for SHRouterOpenClose { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(DummyTransportPeerEventHandler)) + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } +} + +// Transport Handler for the client +struct SHClientOpenClose {} + +impl SHClientOpenClose { + fn new() -> Self { + Self {} + } +} + +impl TransportEventHandler for SHClientOpenClose { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(DummyTransportPeerEventHandler)) + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } +} + +async fn time_transport( + listen_endpoint: &EndPoint, + connect_endpoint: &EndPoint, + lowlatency_transport: bool, +) { + if lowlatency_transport { + println!(">>> Low latency transport"); + } else { + println!(">>> Universal transport"); + } + /* [ROUTER] */ + let router_id = ZenohId::try_from([1]).unwrap(); + + let router_handler = Arc::new(SHRouterOpenClose); + // Create the router transport manager + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + 1, + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ) + .max_sessions(1); + let router_manager = TransportManager::builder() + .whatami(WhatAmI::Router) + .zid(router_id) + .unicast(unicast) + .build(router_handler.clone()) + .unwrap(); + + /* [CLIENT] */ + let client01_id = ZenohId::try_from([2]).unwrap(); + + // Create the transport transport manager for the first client + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + 1, + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ) + .max_sessions(1); + let client01_manager = TransportManager::builder() + .whatami(WhatAmI::Client) + .zid(client01_id) + .unicast(unicast) + .build(Arc::new(SHClientOpenClose::new())) + .unwrap(); + + /* [1] */ + // Add the locator on the router + let start = Instant::now(); + ztimeout!(router_manager.add_listener(listen_endpoint.clone())).unwrap(); + println!("Add listener {}: {:#?}", listen_endpoint, start.elapsed()); + + // Open a transport from the client to the router + let start = Instant::now(); + let c_ses1 = + ztimeout_expected!(client01_manager.open_transport_unicast(connect_endpoint.clone())) + .unwrap(); + println!( + "Open transport {}: {:#?}", + connect_endpoint, + start.elapsed() + ); + + // Verify that the transport has been open on the router + ztimeout!(async { + loop { + let transports = ztimeout!(router_manager.get_transports_unicast()); + let s = transports + .iter() + .find(|s| s.get_zid().unwrap() == client01_id); + + match s { + Some(s) => { + let links = s.get_links().unwrap(); + assert_eq!(links.len(), 1); + break; + } + None => tokio::time::sleep(SLEEP).await, + } + } + }); + + /* [2] */ + // Close the open transport on the client + let start = Instant::now(); + ztimeout!(c_ses1.close()).unwrap(); + println!( + "Close transport {}: {:#?}", + connect_endpoint, + start.elapsed() + ); + + // Verify that the transport has been closed also on the router + ztimeout!(async { + loop { + let transports = ztimeout!(router_manager.get_transports_unicast()); + let index = transports + .iter() + .find(|s| s.get_zid().unwrap() == client01_id); + if index.is_none() { + break; + } + tokio::time::sleep(SLEEP).await; + } + }); + + /* [3] */ + let start = Instant::now(); + ztimeout!(router_manager.del_listener(listen_endpoint)).unwrap(); + println!( + "Delete listener {}: {:#?}", + listen_endpoint, + start.elapsed() + ); + + ztimeout!(async { + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; + } + }); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; + + ztimeout!(router_manager.close()); + ztimeout!(client01_manager.close()); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} + +async fn time_universal_transport(endpoint: &EndPoint) { + time_transport(endpoint, endpoint, false).await +} + +async fn time_lowlatency_transport(endpoint: &EndPoint) { + time_transport(endpoint, endpoint, true).await +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(feature = "transport_ws")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_ws_only() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_ws")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_ws_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only".parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only_with_lowlatency_transport() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_transport" + .parse() + .unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unix_only() { + let _ = env_logger::try_init(); + let f1 = "zenoh-test-unix-socket-9.sock"; + let _ = std::fs::remove_file(f1); + let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); + time_universal_transport(&endpoint).await; + let _ = std::fs::remove_file(f1); + let _ = std::fs::remove_file(format!("{f1}.lock")); +} + +#[cfg(feature = "transport_tls")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tls_only() { + use zenoh_link::tls::config::*; + + let _ = env_logger::try_init(); + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); + endpoint + .config_mut() + .extend( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + ) + .unwrap(); + + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_quic")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_quic_only() { + use zenoh_link::quic::config::*; + + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + // Define the locator + let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); + endpoint + .config_mut() + .extend( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + ) + .unwrap(); + + time_universal_transport(&endpoint).await; +} + +#[cfg(all(feature = "transport_vsock", target_os = "linux"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_vsock_only() { + let _ = env_logger::try_init(); + let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} From 2da0aeb0c59a5634b1975fad1200fb92256ec733 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Apr 2024 11:06:54 +0200 Subject: [PATCH 121/357] Declare message can be Push/Request/RequestContinuous/Response (#902) * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all --- commons/zenoh-codec/src/network/declare.rs | 236 +++++++----------- commons/zenoh-codec/tests/codec.rs | 16 ++ commons/zenoh-protocol/src/network/declare.rs | 215 ++++++++-------- commons/zenoh-protocol/src/network/mod.rs | 6 +- zenoh/src/key_expr.rs | 4 +- zenoh/src/net/routing/dispatcher/face.rs | 3 +- zenoh/src/net/routing/dispatcher/resource.rs | 4 +- zenoh/src/net/routing/hat/client/pubsub.rs | 10 +- zenoh/src/net/routing/hat/client/queries.rs | 8 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 14 +- .../net/routing/hat/linkstate_peer/queries.rs | 14 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 10 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 8 +- zenoh/src/net/routing/hat/router/pubsub.rs | 22 +- zenoh/src/net/routing/hat/router/queries.rs | 22 +- zenoh/src/net/routing/mod.rs | 3 +- zenoh/src/net/runtime/adminspace.rs | 8 +- zenoh/src/net/tests/tables.rs | 12 +- zenoh/src/session.rs | 19 +- 19 files changed, 283 insertions(+), 351 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index d7a25ea0a9..173fbe5e4a 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -19,12 +19,16 @@ use zenoh_buffers::{ ZBuf, }; use zenoh_protocol::{ - common::{iext, imsg, ZExtZ64}, + common::{ + iext, + imsg::{self, HEADER_BITS}, + ZExtZ64, + }, core::{ExprId, ExprLen, WireExpr}, network::{ declare::{ self, common, interest, keyexpr, queryable, subscriber, token, Declare, DeclareBody, - Interest, + DeclareMode, Interest, }, id, Mapping, }, @@ -48,8 +52,7 @@ where DeclareBody::DeclareToken(r) => self.write(&mut *writer, r)?, DeclareBody::UndeclareToken(r) => self.write(&mut *writer, r)?, DeclareBody::DeclareInterest(r) => self.write(&mut *writer, r)?, - DeclareBody::FinalInterest(r) => self.write(&mut *writer, r)?, - DeclareBody::UndeclareInterest(r) => self.write(&mut *writer, r)?, + DeclareBody::DeclareFinal(r) => self.write(&mut *writer, r)?, } Ok(()) @@ -77,8 +80,7 @@ where D_TOKEN => DeclareBody::DeclareToken(codec.read(&mut *reader)?), U_TOKEN => DeclareBody::UndeclareToken(codec.read(&mut *reader)?), D_INTEREST => DeclareBody::DeclareInterest(codec.read(&mut *reader)?), - F_INTEREST => DeclareBody::FinalInterest(codec.read(&mut *reader)?), - U_INTEREST => DeclareBody::UndeclareInterest(codec.read(&mut *reader)?), + D_FINAL => DeclareBody::DeclareFinal(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -95,7 +97,7 @@ where fn write(self, writer: &mut W, x: &Declare) -> Self::Output { let Declare { - interest_id, + mode, ext_qos, ext_tstamp, ext_nodeid, @@ -104,9 +106,13 @@ where // Header let mut header = id::DECLARE; - if x.interest_id.is_some() { - header |= declare::flag::I; - } + header |= match mode { + DeclareMode::Push => 0b00, + DeclareMode::Response(_) => 0b01, + DeclareMode::Request(_) => 0b10, + DeclareMode::RequestContinuous(_) => 0b11, + } << HEADER_BITS; + let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); @@ -116,8 +122,11 @@ where self.write(&mut *writer, header)?; // Body - if let Some(interest_id) = interest_id { - self.write(&mut *writer, interest_id)?; + if let DeclareMode::Request(rid) + | DeclareMode::RequestContinuous(rid) + | DeclareMode::Response(rid) = mode + { + self.write(&mut *writer, rid)?; } // Extensions @@ -166,10 +175,14 @@ where return Err(DidntRead); } - let mut interest_id = None; - if imsg::has_flag(self.header, declare::flag::I) { - interest_id = Some(self.codec.read(&mut *reader)?); - } + // Body + let mode = match (self.header >> HEADER_BITS) & 0b11 { + 0b00 => DeclareMode::Push, + 0b01 => DeclareMode::Response(self.codec.read(&mut *reader)?), + 0b10 => DeclareMode::Request(self.codec.read(&mut *reader)?), + 0b11 => DeclareMode::RequestContinuous(self.codec.read(&mut *reader)?), + _ => return Err(DidntRead), + }; // Extensions let mut ext_qos = declare::ext::QoSType::DEFAULT; @@ -206,7 +219,7 @@ where let body: DeclareBody = self.codec.read(&mut *reader)?; Ok(Declare { - interest_id, + mode, ext_qos, ext_tstamp, ext_nodeid, @@ -215,6 +228,59 @@ where } } +// Final +impl WCodec<&common::DeclareFinal, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &common::DeclareFinal) -> Self::Output { + let common::DeclareFinal = x; + + // Header + let header = declare::id::D_FINAL; + self.write(&mut *writer, header)?; + + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + if imsg::mid(self.header) != declare::id::D_FINAL { + return Err(DidntRead); + } + + // Extensions + let has_ext = imsg::has_flag(self.header, token::flag::Z); + if has_ext { + extension::skip_all(reader, "Final")?; + } + + Ok(common::DeclareFinal) + } +} + // DeclareKeyExpr impl WCodec<&keyexpr::DeclareKeyExpr, &mut W> for Zenoh080 where @@ -907,7 +973,7 @@ where } = x; // Header - let header = declare::id::D_INTEREST | x.flags(); + let header = declare::id::D_INTEREST; self.write(&mut *writer, header)?; // Body @@ -976,140 +1042,6 @@ where } } -// FinalInterest -impl WCodec<&interest::FinalInterest, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &interest::FinalInterest) -> Self::Output { - let interest::FinalInterest { id } = x; - - // Header - let header = declare::id::F_INTEREST; - self.write(&mut *writer, header)?; - - // Body - self.write(&mut *writer, id)?; - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != declare::id::F_INTEREST { - return Err(DidntRead); - } - - // Body - let id: interest::InterestId = self.codec.read(&mut *reader)?; - - // Extensions - let has_ext = imsg::has_flag(self.header, token::flag::Z); - if has_ext { - extension::skip_all(reader, "FinalInterest")?; - } - - Ok(interest::FinalInterest { id }) - } -} - -// UndeclareInterest -impl WCodec<&interest::UndeclareInterest, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &interest::UndeclareInterest) -> Self::Output { - let interest::UndeclareInterest { id, ext_wire_expr } = x; - - // Header - let header = declare::id::U_INTEREST | interest::flag::Z; - self.write(&mut *writer, header)?; - - // Body - self.write(&mut *writer, id)?; - - // Extension - self.write(&mut *writer, (ext_wire_expr, false))?; - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != declare::id::U_INTEREST { - return Err(DidntRead); - } - - // Body - let id: interest::InterestId = self.codec.read(&mut *reader)?; - - // Extensions - let mut ext_wire_expr = common::ext::WireExprType::null(); - - let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); - while has_ext { - let ext: u8 = self.codec.read(&mut *reader)?; - let eodec = Zenoh080Header::new(ext); - match iext::eid(ext) { - common::ext::WireExprExt::ID => { - let (we, ext): (common::ext::WireExprType, bool) = eodec.read(&mut *reader)?; - ext_wire_expr = we; - has_ext = ext; - } - _ => { - has_ext = extension::skip(reader, "UndeclareInterest", ext)?; - } - } - } - - Ok(interest::UndeclareInterest { id, ext_wire_expr }) - } -} - // WARNING: this is a temporary extension used for undeclarations impl WCodec<(&common::ext::WireExprType, bool), &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 2f0e870c4f..d28ba9a4d3 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -31,6 +31,22 @@ use zenoh_protocol::{ zenoh, zextunit, zextz64, zextzbuf, }; +#[test] +fn zbuf_test() { + let mut buffer = vec![0u8; 64]; + + let zbuf = ZBuf::empty(); + let mut writer = buffer.writer(); + + let codec = Zenoh080::new(); + codec.write(&mut writer, &zbuf).unwrap(); + println!("Buffer: {:?}", buffer); + + let mut reader = buffer.reader(); + let ret: ZBuf = codec.read(&mut reader).unwrap(); + assert_eq!(ret, zbuf); +} + const NUM_ITER: usize = 100; const MAX_PAYLOAD_SIZE: usize = 256; diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 10027259c2..996e7768ee 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -18,6 +18,8 @@ use crate::{ zextz64, zextzbuf, }; use alloc::borrow::Cow; +pub use common::*; +use core::sync::atomic::AtomicU32; pub use interest::*; pub use keyexpr::*; pub use queryable::*; @@ -31,24 +33,59 @@ pub mod flag { } /// Flags: -/// - I: Interest If I==1 then the declare is in a response to an Interest with future==false -/// - X: Reserved +/// - |: Mode The mode of the the declaration* +/// -/ /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|X|I| DECLARE | +/// |Z|Mod| DECLARE | /// +-+-+-+---------+ -/// ~interest_id:z32~ if I==1 +/// ~ rid:z32 ~ if Mode != Push /// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// ~ declaration ~ /// +---------------+ /// +/// *Mode of declaration: +/// - Mode 0b00: Push +/// - Mode 0b01: Response +/// - Mode 0b10: Request +/// - Mode 0b11: RequestContinuous + +/// The resolution of a RequestId +pub type DeclareRequestId = u32; +pub type AtomicDeclareRequestId = AtomicU32; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum DeclareMode { + Push, + Request(DeclareRequestId), + RequestContinuous(DeclareRequestId), + Response(DeclareRequestId), +} + +impl DeclareMode { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + + let mut rng = rand::thread_rng(); + + match rng.gen_range(0..4) { + 0 => DeclareMode::Push, + 1 => DeclareMode::Request(rng.gen()), + 2 => DeclareMode::RequestContinuous(rng.gen()), + 3 => DeclareMode::Response(rng.gen()), + _ => unreachable!(), + } + } +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct Declare { - pub interest_id: Option, + pub mode: DeclareMode, pub ext_qos: ext::QoSType, pub ext_tstamp: Option, pub ext_nodeid: ext::NodeIdType, @@ -85,8 +122,8 @@ pub mod id { pub const U_TOKEN: u8 = 0x07; pub const D_INTEREST: u8 = 0x08; - pub const F_INTEREST: u8 = 0x09; - pub const U_INTEREST: u8 = 0x0A; + + pub const D_FINAL: u8 = 0x1A; } #[derive(Debug, Clone, PartialEq, Eq)] @@ -100,8 +137,7 @@ pub enum DeclareBody { DeclareToken(DeclareToken), UndeclareToken(UndeclareToken), DeclareInterest(DeclareInterest), - FinalInterest(FinalInterest), - UndeclareInterest(UndeclareInterest), + DeclareFinal(DeclareFinal), } impl DeclareBody { @@ -111,7 +147,7 @@ impl DeclareBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..11) { + match rng.gen_range(0..10) { 0 => DeclareBody::DeclareKeyExpr(DeclareKeyExpr::rand()), 1 => DeclareBody::UndeclareKeyExpr(UndeclareKeyExpr::rand()), 2 => DeclareBody::DeclareSubscriber(DeclareSubscriber::rand()), @@ -121,8 +157,7 @@ impl DeclareBody { 6 => DeclareBody::DeclareToken(DeclareToken::rand()), 7 => DeclareBody::UndeclareToken(UndeclareToken::rand()), 8 => DeclareBody::DeclareInterest(DeclareInterest::rand()), - 9 => DeclareBody::FinalInterest(FinalInterest::rand()), - 10 => DeclareBody::UndeclareInterest(UndeclareInterest::rand()), + 9 => DeclareBody::DeclareFinal(DeclareFinal::rand()), _ => unreachable!(), } } @@ -135,14 +170,14 @@ impl Declare { let mut rng = rand::thread_rng(); - let interest_id = rng.gen_bool(0.5).then_some(rng.gen::()); + let mode = DeclareMode::rand(); let ext_qos = ext::QoSType::rand(); let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); let ext_nodeid = ext::NodeIdType::rand(); let body = DeclareBody::rand(); Self { - interest_id, + mode, ext_qos, ext_tstamp, ext_nodeid, @@ -154,6 +189,29 @@ impl Declare { pub mod common { use super::*; + /// ```text + /// Flags: + /// - X: Reserved + /// - X: Reserved + /// - Z: Extension If Z==1 then at least one extension is present + /// + /// 7 6 5 4 3 2 1 0 + /// +-+-+-+-+-+-+-+-+ + /// |Z|x|x| D_FINAL | + /// +---------------+ + /// ~ [final_exts] ~ if Z==1 + /// +---------------+ + /// ``` + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct DeclareFinal; + + impl DeclareFinal { + #[cfg(feature = "test")] + pub fn rand() -> Self { + Self + } + } + pub mod ext { use super::*; @@ -545,7 +603,7 @@ pub mod queryable { /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|0_2| U_QBL | + /// |Z|X|X| U_QBL | /// +---------------+ /// ~ qbls_id:z32 ~ /// +---------------+ @@ -668,44 +726,51 @@ pub mod interest { pub type InterestId = u32; pub mod flag { - pub const C: u8 = 1 << 5; // 0x20 Current if C==1 then the interest refers to the current declarations. - pub const F: u8 = 1 << 6; // 0x40 Future if F==1 then the interest refers to the future declarations. + // pub const X: u8 = 1 << 5; // 0x20 Reserved + // pub const X: u8 = 1 << 6; // 0x40 Reserved pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } /// # DeclareInterest message /// - /// The DECLARE INTEREST message is sent to request the transmission of existing and future - /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be sent to - /// request the transmisison of all existing subscriptions matching `a/*`. A FINAL INTEREST is used to - /// mark the end of the transmission of exisiting matching declarations. + /// The DECLARE INTEREST message is sent to request the transmission of current and/or future + /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be + /// sent to request the transmisison of all current subscriptions matching `a/*`. + /// + /// The behaviour of a DECLARE INTEREST depends on the DECLARE MODE in the DECLARE MESSAGE: + /// - Push: only future declarations + /// - Request: only current declarations + /// - RequestContinous: current and future declarations + /// - Response: invalid /// - /// E.g., the [`DeclareInterest`]/[`FinalInterest`]/[`UndeclareInterest`] message flow is the following: + /// E.g., the [`DeclareInterest`] message flow is the following: /// /// ```text /// A B /// | DECL INTEREST | - /// |------------------>| -- This is a DeclareInterest e.g. for subscriber declarations/undeclarations. + /// |------------------>| -- Sent in Declare::RequestContinuous. + /// | | This is a DeclareInterest e.g. for subscriber declarations/undeclarations. /// | | /// | DECL SUBSCRIBER | - /// |<------------------| + /// |<------------------| -- Sent in Declare::Response /// | DECL SUBSCRIBER | - /// |<------------------| + /// |<------------------| -- Sent in Declare::Response /// | DECL SUBSCRIBER | - /// |<------------------| + /// |<------------------| -- Sent in Declare::Response /// | | - /// | FINAL INTEREST | - /// |<------------------| -- The FinalInterest signals that all known subscribers have been transmitted. + /// | FINAL | + /// |<------------------| -- Sent in Declare::Response /// | | /// | DECL SUBSCRIBER | - /// |<------------------| -- This is a new subscriber declaration. + /// |<------------------| -- Sent in Declare::Push. This is a new subscriber declaration. /// | UNDECL SUBSCRIBER | - /// |<------------------| -- This is a new subscriber undeclaration. + /// |<------------------| -- Sent in Declare::Push. This is a new subscriber undeclaration. /// | | /// | ... | /// | | - /// | UNDECL INTEREST | - /// |------------------>| -- This is an UndeclareInterest to stop receiving subscriber declarations/undeclarations. + /// | FINAL | + /// |------------------>| -- Sent in Declare::RequestContinuous. + /// | | This stops the transmission of subscriber declarations/undeclarations. /// | | /// ``` /// @@ -713,15 +778,13 @@ pub mod interest { /// /// ```text /// Flags: - /// - C: Current if C==1 then the interest refers to the current declarations. - /// - F: Future if F==1 then the interest refers to the future declarations. Note that if F==0 then: - /// - Declarations SHOULD NOT be sent after the FinalInterest; - /// - UndeclareInterest SHOULD NOT be sent after the FinalInterest. + /// - X: Reserved + /// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|F|C| D_INT | + /// |Z|F|X| D_INT | /// +---------------+ /// ~ intst_id:z32 ~ /// +---------------+ @@ -752,17 +815,6 @@ pub mod interest { } impl DeclareInterest { - pub fn flags(&self) -> u8 { - let mut interest = self.interest; - if self.interest.current() { - interest += Interest::CURRENT; - } - if self.interest.future() { - interest += Interest::FUTURE; - } - interest.flags - } - pub fn options(&self) -> u8 { let mut interest = self.interest; if let Some(we) = self.wire_expr.as_ref() { @@ -801,9 +853,6 @@ pub mod interest { } impl Interest { - // Header - pub const CURRENT: Interest = Interest::flags(interest::flag::C); - pub const FUTURE: Interest = Interest::flags(interest::flag::F); // Flags pub const KEYEXPRS: Interest = Interest::options(1); pub const SUBSCRIBERS: Interest = Interest::options(1 << 1); @@ -820,10 +869,6 @@ pub mod interest { | Interest::TOKENS.options, ); - const fn flags(flags: u8) -> Self { - Self { flags, options: 0 } - } - const fn options(options: u8) -> Self { Self { flags: 0, options } } @@ -835,14 +880,6 @@ pub mod interest { } } - pub const fn current(&self) -> bool { - imsg::has_flag(self.flags, Self::CURRENT.flags) - } - - pub const fn future(&self) -> bool { - imsg::has_flag(self.flags, Self::FUTURE.flags) - } - pub const fn keyexprs(&self) -> bool { imsg::has_flag(self.options, Self::KEYEXPRS.options) } @@ -881,12 +918,6 @@ pub mod interest { let mut rng = rand::thread_rng(); let mut s = Self::empty(); - if rng.gen_bool(0.5) { - s += Interest::CURRENT; - } - if rng.gen_bool(0.5) { - s += Interest::FUTURE; - } if rng.gen_bool(0.5) { s += Interest::KEYEXPRS; } @@ -905,9 +936,7 @@ pub mod interest { impl PartialEq for Interest { fn eq(&self, other: &Self) -> bool { - self.current() == other.current() - && self.future() == other.future() - && self.keyexprs() == other.keyexprs() + self.keyexprs() == other.keyexprs() && self.subscribers() == other.subscribers() && self.queryables() == other.queryables() && self.tokens() == other.tokens() @@ -918,16 +947,6 @@ pub mod interest { impl Debug for Interest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Interest {{ ")?; - if self.current() { - write!(f, "C:Y, ")?; - } else { - write!(f, "C:N, ")?; - } - if self.future() { - write!(f, "F:Y, ")?; - } else { - write!(f, "F:N, ")?; - } if self.keyexprs() { write!(f, "K:Y, ")?; } else { @@ -1003,38 +1022,6 @@ pub mod interest { } } - /// ```text - /// Flags: - /// - X: Reserved - /// - X: Reserved - /// - Z: Extension If Z==1 then at least one extension is present - /// - /// 7 6 5 4 3 2 1 0 - /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| F_INT | - /// +---------------+ - /// ~ intst_id:z32 ~ - /// +---------------+ - /// ~ [decl_exts] ~ if Z==1 - /// +---------------+ - /// ``` - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct FinalInterest { - pub id: InterestId, - } - - impl FinalInterest { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let id: InterestId = rng.gen(); - - Self { id } - } - } - /// ```text /// Flags: /// - X: Reserved diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 0e198ddf0f..cbf9894aef 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -20,9 +20,9 @@ pub mod response; use core::fmt; pub use declare::{ - Declare, DeclareBody, DeclareInterest, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, - DeclareToken, UndeclareInterest, UndeclareKeyExpr, UndeclareQueryable, UndeclareSubscriber, - UndeclareToken, + Declare, DeclareBody, DeclareFinal, DeclareInterest, DeclareKeyExpr, DeclareMode, + DeclareQueryable, DeclareSubscriber, DeclareToken, UndeclareInterest, UndeclareKeyExpr, + UndeclareQueryable, UndeclareSubscriber, UndeclareToken, }; pub use oam::Oam; pub use push::Push; diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index aaa1d13724..17aa0425b6 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -53,7 +53,7 @@ pub use zenoh_keyexpr::*; pub use zenoh_macros::{kedefine, keformat, kewrite}; use zenoh_protocol::{ core::{key_expr::canon::Canonizable, ExprId, WireExpr}, - network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, + network::{declare, DeclareBody, DeclareMode, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; @@ -664,7 +664,7 @@ impl SyncResolve for KeyExprUndeclaration<'_> { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(zenoh_protocol::network::Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index cb565053c9..3531dd2d88 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -211,8 +211,7 @@ impl Primitives for Face { zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), zenoh_protocol::network::DeclareBody::DeclareInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::FinalInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::UndeclareInterest(_m) => todo!(), + zenoh_protocol::network::DeclareBody::DeclareFinal(_m) => todo!(), } drop(ctrl_lock); } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 194b97fca8..941b37f916 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -27,7 +27,7 @@ use zenoh_protocol::{ network::{ declare::{ ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, - DeclareBody, DeclareKeyExpr, + DeclareBody, DeclareKeyExpr, DeclareMode, }, Mapping, }, @@ -452,7 +452,7 @@ impl Resource { .insert(expr_id, nonwild_prefix.clone()); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index e85bb77bf9..6c689d3336 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareMode, DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -53,7 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -137,7 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -171,7 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -206,7 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 5c0bc5349b..28e1d75460 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareMode, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -93,7 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -165,7 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -418,7 +418,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -460,7 +460,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 150c12a632..356793e3a3 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -36,7 +36,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareMode, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -126,7 +126,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -170,7 +170,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -339,7 +339,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -365,7 +365,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index b495248788..5ac0b22846 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareMode, DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -53,7 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -137,7 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -171,7 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -206,7 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 72c32b9217..c2d62c7658 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareMode, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -93,7 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -165,7 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -412,7 +412,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -564,7 +564,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -606,7 +606,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -635,7 +635,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -774,7 +774,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -800,7 +800,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 99e787beb5..e647cf2dc7 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -36,7 +36,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareMode, DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -194,7 +194,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -248,7 +248,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -473,7 +473,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -499,7 +499,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -775,7 +775,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -874,7 +874,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -900,7 +900,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 0ddf12b82f..77f51c16b3 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -117,8 +117,7 @@ impl RoutingContext { DeclareBody::DeclareToken(m) => Some(&m.wire_expr), DeclareBody::UndeclareToken(m) => Some(&m.ext_wire_expr.wire_expr), DeclareBody::DeclareInterest(m) => m.wire_expr.as_ref(), - DeclareBody::FinalInterest(_) => None, - DeclareBody::UndeclareInterest(m) => Some(&m.ext_wire_expr.wire_expr), + DeclareBody::DeclareFinal(_) => None, }, NetworkBody::OAM(_) => None, } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index d460ee3f1c..a5739d830c 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -39,8 +39,8 @@ use zenoh_protocol::{ }, network::{ declare::{queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo}, - ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, - ResponseFinal, + ext, Declare, DeclareBody, DeclareMode, DeclareQueryable, DeclareSubscriber, Push, Request, + Response, ResponseFinal, }, zenoh::{PushBody, RequestBody}, }; @@ -276,7 +276,7 @@ impl AdminSpace { zlock!(admin.primitives).replace(primitives.clone()); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, @@ -289,7 +289,7 @@ impl AdminSpace { }); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 4067f2ad8f..55ff9f0a4d 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -26,7 +26,7 @@ use zenoh_protocol::core::{ key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, }; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; +use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr, DeclareMode}; use zenoh_protocol::zenoh::{PushBody, Put}; #[test] @@ -579,7 +579,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -607,7 +607,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -629,7 +629,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -657,7 +657,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -679,7 +679,7 @@ fn client_test() { Primitives::send_declare( primitives2.as_ref(), Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index addb757807..9bc6c9c331 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -71,7 +71,7 @@ use zenoh_protocol::{ network::{ declare::{ self, common::ext::WireExprType, queryable::ext::QueryableInfoType, - subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, + subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, ext, @@ -872,7 +872,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1085,7 +1085,7 @@ impl Session { // }; primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1142,7 +1142,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1194,7 +1194,7 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1216,7 +1216,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1252,7 +1252,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1277,7 +1277,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - interest_id: None, + mode: DeclareMode::Push, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -2047,8 +2047,7 @@ impl Primitives for Session { DeclareBody::DeclareToken(_) => todo!(), DeclareBody::UndeclareToken(_) => todo!(), DeclareBody::DeclareInterest(_) => todo!(), - DeclareBody::FinalInterest(_) => todo!(), - DeclareBody::UndeclareInterest(_) => todo!(), + DeclareBody::DeclareFinal(_) => todo!(), } } From 8f8eb2589a57c1074622c125f5111c4afde9a1e7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 11:19:56 +0200 Subject: [PATCH 122/357] typedefs for complex builder types (#890) --- zenoh/src/publication.rs | 21 +++++++++++++++------ zenoh/src/queryable.rs | 8 ++++++-- zenoh/src/session.rs | 4 ++-- 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 64fa5b49c6..c176ad32e0 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -46,7 +46,8 @@ pub struct PublicationBuilderPut { #[derive(Debug, Clone)] pub struct PublicationBuilderDelete; -/// A builder for initializing a [`put`](crate::Session::put) and [`delete`](crate::Session::delete) operations +/// A builder for initializing [`Session::put`](crate::Session::put), [`Session::delete`](crate::Session::delete), +/// [`Publisher::put`](crate::Publisher::put), and [`Publisher::delete`](crate::Publisher::delete) operations. /// /// # Examples /// ``` @@ -78,6 +79,17 @@ pub struct PublicationBuilder { pub(crate) attachment: Option, } +pub type SessionPutBuilder<'a, 'b> = + PublicationBuilder, PublicationBuilderPut>; + +pub type SessionDeleteBuilder<'a, 'b> = + PublicationBuilder, PublicationBuilderDelete>; + +pub type PublisherPutBuilder<'a> = PublicationBuilder<&'a Publisher<'a>, PublicationBuilderPut>; + +pub type PublisherDeleteBuilder<'a> = + PublicationBuilder<&'a Publisher<'a>, PublicationBuilderDelete>; + impl QoSBuilderTrait for PublicationBuilder, T> { #[inline] fn congestion_control(self, congestion_control: CongestionControl) -> Self { @@ -405,10 +417,7 @@ impl<'a> Publisher<'a> { /// # } /// ``` #[inline] - pub fn put( - &self, - payload: IntoPayload, - ) -> PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> + pub fn put(&self, payload: IntoPayload) -> PublisherPutBuilder<'_> where IntoPayload: Into, { @@ -439,7 +448,7 @@ impl<'a> Publisher<'a> { /// publisher.delete().res().await.unwrap(); /// # } /// ``` - pub fn delete(&self) -> PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + pub fn delete(&self) -> PublisherDeleteBuilder<'_> { PublicationBuilder { publisher: self, kind: PublicationBuilderDelete, diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 0696fcbe33..c2fd67fcf4 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -129,7 +129,7 @@ impl Query { &self, key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> ReplyBuilder<'_, 'b, ReplyBuilderPut> + ) -> ReplyPutBuilder<'_, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -171,7 +171,7 @@ impl Query { pub fn reply_del<'b, TryIntoKeyExpr>( &self, key_expr: TryIntoKeyExpr, - ) -> ReplyBuilder<'_, 'b, ReplyBuilderDelete> + ) -> ReplyDeleteBuilder<'_, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -274,6 +274,10 @@ pub struct ReplyBuilder<'a, 'b, T> { attachment: Option, } +pub type ReplyPutBuilder<'a, 'b> = ReplyBuilder<'a, 'b, ReplyBuilderPut>; + +pub type ReplyDeleteBuilder<'a, 'b> = ReplyBuilder<'a, 'b, ReplyBuilderDelete>; + impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { fn timestamp>>(self, timestamp: U) -> Self { Self { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index c44cb4f817..d9427f270c 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -705,7 +705,7 @@ impl Session { &'a self, key_expr: TryIntoKeyExpr, payload: IntoPayload, - ) -> PublicationBuilder, PublicationBuilderPut> + ) -> SessionPutBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -745,7 +745,7 @@ impl Session { pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> PublicationBuilder, PublicationBuilderDelete> + ) -> SessionDeleteBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, From aeb15c311c421980f939de63b3f1ee1ffc67ecd7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 12:16:31 +0200 Subject: [PATCH 123/357] runtime to api --- zenoh/src/lib.rs | 7 +++++-- zenoh/src/net/routing/hat/client/mod.rs | 2 +- zenoh/src/net/routing/hat/linkstate_peer/mod.rs | 2 +- zenoh/src/net/routing/hat/mod.rs | 2 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 2 +- zenoh/src/net/routing/hat/router/mod.rs | 2 +- zenoh/src/plugins/sealed.rs | 2 +- 7 files changed, 11 insertions(+), 8 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3a5c358ae2..3e4a0ddda9 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -227,9 +227,12 @@ pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; } +pub mod runtime { + pub use crate::net::runtime::{AdminSpace, Runtime}; +} + mod api; -pub(crate) mod net; -pub use net::runtime; +mod net; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; #[cfg(feature = "unstable")] diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 8b7031152a..6ca0af1e17 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -22,7 +22,7 @@ use crate::{ dispatcher::face::Face, router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, - runtime::Runtime, + net::runtime::Runtime, }; use self::{ diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 1b8ea8f7d4..14f0e9f57e 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -30,6 +30,7 @@ use super::{ HatBaseTrait, HatTrait, }; use crate::{ + net::runtime::Runtime, net::{ codec::Zenoh080Routing, protocol::linkstate::LinkStateList, @@ -39,7 +40,6 @@ use crate::{ router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, - runtime::Runtime, }; use std::{ any::Any, diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 70e94ac176..3d1ae0f632 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -24,7 +24,7 @@ use super::{ }, router::RoutesIndexes, }; -use crate::runtime::Runtime; +use crate::net::runtime::Runtime; use std::{any::Any, sync::Arc}; use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI}; diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 1d87c2eb23..929247f3a9 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -18,6 +18,7 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use crate::{ + net::runtime::Runtime, net::{ codec::Zenoh080Routing, protocol::linkstate::LinkStateList, @@ -26,7 +27,6 @@ use crate::{ router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, - runtime::Runtime, }; use self::{ diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 52f067037e..cf59a65ea8 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -34,6 +34,7 @@ use super::{ HatBaseTrait, HatTrait, }; use crate::{ + net::runtime::Runtime, net::{ codec::Zenoh080Routing, protocol::linkstate::LinkStateList, @@ -43,7 +44,6 @@ use crate::{ router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, }, - runtime::Runtime, }; use std::{ any::Any, diff --git a/zenoh/src/plugins/sealed.rs b/zenoh/src/plugins/sealed.rs index cc11fc213d..3684324cf2 100644 --- a/zenoh/src/plugins/sealed.rs +++ b/zenoh/src/plugins/sealed.rs @@ -14,7 +14,7 @@ //! `zenohd`'s plugin system. For more details, consult the [detailed documentation](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Plugins/Zenoh%20Plugins.md). -use crate::{prelude::Selector, runtime::Runtime}; +use crate::{net::runtime::Runtime, prelude::Selector}; use zenoh_core::zconfigurable; use zenoh_plugin_trait::{ From 5e4b7d8ac5320ccea2fda66b3b282e42f204b370 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 12:35:03 +0200 Subject: [PATCH 124/357] config in api --- plugins/zenoh-plugin-storage-manager/tests/operations.rs | 1 + plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 1 + zenoh/src/api/session.rs | 4 ++-- zenoh/src/lib.rs | 9 +++++++-- zenoh/src/net/runtime/adminspace.rs | 4 ++-- zenoh/src/net/runtime/mod.rs | 2 +- zenoh/src/prelude.rs | 2 +- zenoh/tests/connection_retry.rs | 2 +- zenoh/tests/interceptors.rs | 1 + 9 files changed, 17 insertions(+), 9 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 6b64bbd742..d3c6207496 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,6 +20,7 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; +use zenoh::config::ValidatedMap; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 864ec5b79e..1b40a83cd5 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,6 +21,7 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; +use zenoh::config::ValidatedMap; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 83a57ce260..5793794815 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -41,8 +41,6 @@ use crate::api::selector::TIME_RANGE_KEY; use crate::api::subscriber::SubscriberBuilder; use crate::api::subscriber::SubscriberState; use crate::api::value::Value; -use crate::config::Config; -use crate::config::Notifier; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; @@ -71,6 +69,8 @@ use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; +use zenoh_config::Config; +use zenoh_config::Notifier; use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; use zenoh_protocol::core::Reliability; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3e4a0ddda9..77cc120c5c 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -231,10 +231,15 @@ pub mod runtime { pub use crate::net::runtime::{AdminSpace, Runtime}; } +pub mod config { + pub use zenoh_config::{ + client, default, peer, Config, ModeDependentValue, PermissionsConf, PluginLoad, + ValidatedMap, + }; +} + mod api; mod net; -#[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] -pub use zenoh_config as config; #[cfg(feature = "unstable")] pub mod plugins; pub mod prelude; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 708d2bb349..97e77246bb 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -70,7 +70,7 @@ pub struct AdminSpace { #[derive(Debug, Clone)] enum PluginDiff { Delete(String), - Start(crate::config::PluginLoad), + Start(zenoh_config::PluginLoad), } impl ConfigValidator for AdminSpace { @@ -93,7 +93,7 @@ impl ConfigValidator for AdminSpace { impl AdminSpace { fn start_plugin( plugin_mgr: &mut plugins::PluginsManager, - config: &crate::config::PluginLoad, + config: &zenoh_config::PluginLoad, start_args: &Runtime, ) -> ZResult<()> { let name = &config.name; diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 1d81811c76..98f781720b 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -23,7 +23,6 @@ pub mod orchestrator; use super::primitives::DeMux; use super::routing; use super::routing::router::Router; -use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; use crate::GIT_VERSION; pub use adminspace::AdminSpace; use futures::stream::StreamExt; @@ -34,6 +33,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; +use zenoh_config::{unwrap_or_default, Config, ModeDependent, Notifier}; use zenoh_link::{EndPoint, Link}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 317cad3a68..2443102d14 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -36,7 +36,7 @@ pub(crate) mod common { #[zenoh_macros::unstable] pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; - pub use crate::config::{self, Config, ValidatedMap}; + pub use crate::config::{self, Config}; pub use crate::handlers::IntoHandler; pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index fcb071b489..0a3ed01ce7 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,4 +1,4 @@ -use config::ConnectionRetryConf; +use zenoh_config::{ConnectionRetryConf, ValidatedMap}; use zenoh::prelude::sync::*; diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 1ee93e4949..a6eff19ec9 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::sync::{Arc, Mutex}; +use zenoh_config::ValidatedMap; use zenoh_core::zlock; struct IntervalCounter { From bf7e0f10beb9d2664f69c147743c3507fa9e2845 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 12:51:03 +0200 Subject: [PATCH 125/357] plugins into api --- zenoh/src/{plugins/sealed.rs => api/plugins.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{plugins/sealed.rs => api/plugins.rs} (100%) diff --git a/zenoh/src/plugins/sealed.rs b/zenoh/src/api/plugins.rs similarity index 100% rename from zenoh/src/plugins/sealed.rs rename to zenoh/src/api/plugins.rs From b4552b2c146d0584469ba99304d1dec77e4e235f Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 12:59:51 +0200 Subject: [PATCH 126/357] plugins, buffers to api --- .../src/replica/storage.rs | 2 +- zenoh/src/api.rs | 2 ++ zenoh/src/lib.rs | 25 +++++++++++++------ zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/plugins/mod.rs | 23 ----------------- 5 files changed, 21 insertions(+), 33 deletions(-) delete mode 100644 zenoh/src/plugins/mod.rs diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 1abe311b65..646aebc837 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -21,7 +21,7 @@ use futures::select; use std::collections::{HashMap, HashSet}; use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; -use zenoh::buffers::buffer::SplitBuffer; +use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 1af7da37c5..44496822ea 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -20,6 +20,8 @@ pub(crate) mod info; pub(crate) mod key_expr; pub(crate) mod liveliness; pub(crate) mod payload; +#[cfg(feature = "unstable")] +pub(crate) mod plugins; pub(crate) mod publication; pub(crate) mod query; pub(crate) mod queryable; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 77cc120c5c..60f5e61965 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -79,6 +79,9 @@ extern crate zenoh_core; #[macro_use] extern crate zenoh_result; +mod api; +mod net; + pub(crate) type Id = u32; use git_version::git_version; @@ -117,6 +120,13 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; +/// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate +/// reading and writing data. +pub mod buffers { + pub use zenoh_buffers::buffer::SplitBuffer; + pub use zenoh_buffers::{ZBuf, ZSlice}; +} + pub mod key_expr { pub use crate::api::key_expr::kedefine; pub use crate::api::key_expr::keformat; @@ -238,14 +248,13 @@ pub mod config { }; } -mod api; -mod net; -#[cfg(feature = "unstable")] -pub mod plugins; +pub mod plugins { + pub use crate::api::plugins::PluginsManager; + pub use crate::api::plugins::Response; + pub use crate::api::plugins::RunningPlugin; + pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; +} + pub mod prelude; #[cfg(feature = "shared-memory")] pub use zenoh_shm as shm; - -/// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate -/// reading and writing data. -pub use zenoh_buffers as buffers; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 97e77246bb..fa5c5ef1bf 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -14,13 +14,13 @@ use super::routing::dispatcher::face::Face; use super::Runtime; use crate::api::builders::sample::ValueBuilderTrait; use crate::api::key_expr::KeyExpr; +use crate::api::plugins; use crate::api::queryable::Query; use crate::api::queryable::QueryInner; use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; use crate::payload::Payload; -use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use log::{error, trace}; use serde_json::json; diff --git a/zenoh/src/plugins/mod.rs b/zenoh/src/plugins/mod.rs deleted file mode 100644 index d72139cc29..0000000000 --- a/zenoh/src/plugins/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! ⚠️ WARNING ⚠️ -//! -//! This module is intended for Zenoh's internal use. -//! -//! [Click here for Zenoh's documentation](../../zenoh/index.html) -pub(crate) mod sealed; - -#[zenoh_macros::unstable] -pub use sealed::*; From be9c672c404602a506ed0557cfdd4bfb875b7a65 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 13:14:03 +0200 Subject: [PATCH 127/357] shm to api --- zenoh/src/lib.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 60f5e61965..3bb919c933 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -255,6 +255,9 @@ pub mod plugins { pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; } -pub mod prelude; #[cfg(feature = "shared-memory")] -pub use zenoh_shm as shm; +pub mod shm { + pub use zenoh_shm::SharedMemoryManager; +} + +pub mod prelude; From 71a9423738076d07ff6a83a043cee250cb06350d Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 5 Apr 2024 14:19:38 +0200 Subject: [PATCH 128/357] Declare message can be Push/Request/RequestContinuous/Response 2 (#906) * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs --------- Co-authored-by: Luca Cominardi --- commons/zenoh-codec/src/network/declare.rs | 6 +- commons/zenoh-protocol/src/network/declare.rs | 58 +++++++++++-------- 2 files changed, 34 insertions(+), 30 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index 173fbe5e4a..6e9dad12ce 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -967,7 +967,6 @@ where fn write(self, writer: &mut W, x: &interest::DeclareInterest) -> Self::Output { let interest::DeclareInterest { - id, interest: _, wire_expr, } = x; @@ -977,7 +976,6 @@ where self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, id)?; self.write(&mut *writer, x.options())?; if let Some(we) = wire_expr.as_ref() { self.write(&mut *writer, we)?; @@ -1012,9 +1010,8 @@ where } // Body - let id: interest::InterestId = self.codec.read(&mut *reader)?; let options: u8 = self.codec.read(&mut *reader)?; - let interest = Interest::from((imsg::flags(self.header), options)); + let interest = Interest::from(options); let mut wire_expr = None; if interest.restricted() { @@ -1035,7 +1032,6 @@ where } Ok(interest::DeclareInterest { - id, interest, wire_expr, }) diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 996e7768ee..6cd2b2200f 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -733,23 +733,23 @@ pub mod interest { /// # DeclareInterest message /// - /// The DECLARE INTEREST message is sent to request the transmission of current and/or future + /// The DECLARE INTEREST message is sent to request the transmission of current and optionally future /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be /// sent to request the transmisison of all current subscriptions matching `a/*`. /// /// The behaviour of a DECLARE INTEREST depends on the DECLARE MODE in the DECLARE MESSAGE: - /// - Push: only future declarations + /// - Push: invalid /// - Request: only current declarations /// - RequestContinous: current and future declarations /// - Response: invalid /// - /// E.g., the [`DeclareInterest`] message flow is the following: + /// E.g., the [`DeclareInterest`] message flow is the following for a Request: /// /// ```text /// A B /// | DECL INTEREST | - /// |------------------>| -- Sent in Declare::RequestContinuous. - /// | | This is a DeclareInterest e.g. for subscriber declarations/undeclarations. + /// |------------------>| -- Sent in Declare::Request. + /// | | This is a DeclareInterest e.g. for subscriber declarations. /// | | /// | DECL SUBSCRIBER | /// |<------------------| -- Sent in Declare::Response @@ -760,6 +760,26 @@ pub mod interest { /// | | /// | FINAL | /// |<------------------| -- Sent in Declare::Response + /// ``` + /// + /// + /// And the [`DeclareInterest`] message flow is the following for a RequestContinuous: + /// + /// ```text + /// A B + /// | DECL INTEREST | + /// |------------------>| -- Sent in Declare::RequestContinuous. + /// | | This is a DeclareInterest e.g. for subscriber declarations/undeclarations. + /// | | + /// | DECL SUBSCRIBER | + /// |<------------------| -- Sent in Declare::Push + /// | DECL SUBSCRIBER | + /// |<------------------| -- Sent in Declare::Push + /// | DECL SUBSCRIBER | + /// |<------------------| -- Sent in Declare::Push + /// | | + /// | FINAL | + /// |<------------------| -- Sent in Declare::Response /// | | /// | DECL SUBSCRIBER | /// |<------------------| -- Sent in Declare::Push. This is a new subscriber declaration. @@ -784,9 +804,7 @@ pub mod interest { /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|F|X| D_INT | - /// +---------------+ - /// ~ intst_id:z32 ~ + /// |Z|X|X| D_INT | /// +---------------+ /// |A|M|N|R|T|Q|S|K| (*) /// +---------------+ @@ -809,7 +827,6 @@ pub mod interest { /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct DeclareInterest { - pub id: InterestId, pub interest: Interest, pub wire_expr: Option>, } @@ -834,12 +851,10 @@ pub mod interest { use rand::Rng; let mut rng = rand::thread_rng(); - let id: InterestId = rng.gen(); let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); let interest = Interest::rand(); Self { - id, wire_expr, interest, } @@ -848,7 +863,6 @@ pub mod interest { #[derive(Clone, Copy)] pub struct Interest { - flags: u8, options: u8, } @@ -870,14 +884,11 @@ pub mod interest { ); const fn options(options: u8) -> Self { - Self { flags: 0, options } + Self { options } } pub const fn empty() -> Self { - Self { - flags: 0, - options: 0, - } + Self { options: 0 } } pub const fn keyexprs(&self) -> bool { @@ -982,17 +993,17 @@ pub mod interest { impl Add for Interest { type Output = Self; + #[allow(clippy::suspicious_arithmetic_impl)] // Allows to implement Add & Sub for Interest fn add(self, rhs: Self) -> Self::Output { Self { - flags: self.flags | rhs.flags, options: self.options | rhs.options, } } } impl AddAssign for Interest { + #[allow(clippy::suspicious_op_assign_impl)] // Allows to implement Add & Sub for Interest fn add_assign(&mut self, rhs: Self) { - self.flags |= rhs.flags; self.options |= rhs.options; } } @@ -1002,7 +1013,6 @@ pub mod interest { fn sub(self, rhs: Self) -> Self::Output { Self { - flags: self.flags & !rhs.flags, options: self.options & !rhs.options, } } @@ -1010,15 +1020,13 @@ pub mod interest { impl SubAssign for Interest { fn sub_assign(&mut self, rhs: Self) { - self.flags &= !rhs.flags; self.options &= !rhs.options; } } - impl From<(u8, u8)> for Interest { - fn from(value: (u8, u8)) -> Self { - let (flags, options) = value; - Self { flags, options } + impl From for Interest { + fn from(options: u8) -> Self { + Self { options } } } From eb1a80ac9ddc7c15942238e477993825f559cd17 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Apr 2024 14:47:11 +0200 Subject: [PATCH 129/357] Fix use and unstable visibility --- zenoh/src/prelude.rs | 5 ++++- zenoh/src/queryable.rs | 3 +-- zenoh/src/sample/builder.rs | 8 +++----- zenoh/src/session.rs | 1 + zenoh/tests/qos.rs | 1 - zenoh/tests/routing.rs | 1 - 6 files changed, 9 insertions(+), 10 deletions(-) diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 850148f506..2e95e8d908 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -62,8 +62,11 @@ pub(crate) mod common { pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; pub use crate::sample::builder::{ - QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; + + #[zenoh_macros::unstable] + pub use crate::sample::builder::SampleBuilderTrait; } /// Prelude to import when using Zenoh's sync API. diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index c2fd67fcf4..794ff3a504 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,13 +18,12 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::builder::SampleBuilder; use crate::sample::{QoSBuilder, SourceInfo}; use crate::Id; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment}; +use crate::{query::ReplyKeyExpr, sample::Attachment, sample::builder::SampleBuilder}; use std::fmt; use std::future::Ready; use std::ops::Deref; diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 5fab36617d..fca55edd09 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -15,11 +15,8 @@ use std::marker::PhantomData; #[cfg(feature = "unstable")] -use crate::sample::Attachment; -use crate::sample::QoS; -use crate::sample::QoSBuilder; -#[cfg(feature = "unstable")] -use crate::sample::SourceInfo; +use crate::sample::{Attachment, SourceInfo}; +use crate::sample::{QoS, QoSBuilder}; use crate::Encoding; use crate::KeyExpr; use crate::Payload; @@ -47,6 +44,7 @@ pub trait TimestampBuilderTrait { fn timestamp>>(self, timestamp: T) -> Self; } +#[zenoh_macros::unstable] pub trait SampleBuilderTrait { /// Attach source information #[zenoh_macros::unstable] diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index d9427f270c..67bec5f488 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -41,6 +41,7 @@ use crate::Priority; use crate::Sample; use crate::SampleKind; use crate::Selector; +#[cfg(feature = "unstable")] use crate::SourceInfo; use crate::Value; use log::{error, trace, warn}; diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 5fd3edd985..1885c316ea 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,7 +13,6 @@ // use std::time::Duration; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::{publication::Priority, SessionDeclarations}; use zenoh_core::ztimeout; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 1031630a68..56bacd7fdd 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -18,7 +18,6 @@ use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::QoSBuilderTrait; use zenoh::Result; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; From a43e4518875f1c4be314943e78fdc483ae6f9844 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Apr 2024 14:50:16 +0200 Subject: [PATCH 130/357] Add payload and encoding accessors for Query --- zenoh/src/queryable.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 794ff3a504..6fbb4e9090 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -23,7 +23,7 @@ use crate::Id; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment, sample::builder::SampleBuilder}; +use crate::{query::ReplyKeyExpr, sample::builder::SampleBuilder, sample::Attachment}; use std::fmt; use std::future::Ready; use std::ops::Deref; @@ -97,6 +97,18 @@ impl Query { self.inner.value.as_ref() } + /// This Query's payload. + #[inline(always)] + pub fn payload(&self) -> Option<&Payload> { + self.inner.value.as_ref().map(|v| &v.payload) + } + + /// This Query's encoding. + #[inline(always)] + pub fn encoding(&self) -> Option<&Encoding> { + self.inner.value.as_ref().map(|v| &v.encoding) + } + #[zenoh_macros::unstable] pub fn attachment(&self) -> Option<&Attachment> { self.inner.attachment.as_ref() From 1ad8c84c8b3f2f0f93f5dadb3a190af198e4e289 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 5 Apr 2024 14:52:08 +0200 Subject: [PATCH 131/357] cargo fmt --all --- zenoh/src/prelude.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 2e95e8d908..e2327c0dcc 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -61,9 +61,7 @@ pub(crate) mod common { pub use crate::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - pub use crate::sample::builder::{ - QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, - }; + pub use crate::sample::builder::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; #[zenoh_macros::unstable] pub use crate::sample::builder::SampleBuilderTrait; From 5ee2bdb26780926b381d9bb93f0d52a262b06488 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 5 Apr 2024 15:16:12 +0200 Subject: [PATCH 132/357] Declare message can be Push/Request/RequestContinuous/Response 3 (#908) * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs * Remove unused UndeclareInterest --------- Co-authored-by: Luca Cominardi --- commons/zenoh-protocol/src/network/declare.rs | 34 ------------------- commons/zenoh-protocol/src/network/mod.rs | 4 +-- 2 files changed, 2 insertions(+), 36 deletions(-) diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 6cd2b2200f..31e8adcc6e 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -1029,38 +1029,4 @@ pub mod interest { Self { options } } } - - /// ```text - /// Flags: - /// - X: Reserved - /// - X: Reserved - /// - Z: Extension If Z==1 then at least one extension is present - /// - /// 7 6 5 4 3 2 1 0 - /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| U_INT | - /// +---------------+ - /// ~ intst_id:z32 ~ - /// +---------------+ - /// ~ [decl_exts] ~ if Z==1 - /// +---------------+ - /// ``` - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct UndeclareInterest { - pub id: InterestId, - pub ext_wire_expr: common::ext::WireExprType, - } - - impl UndeclareInterest { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let id: InterestId = rng.gen(); - let ext_wire_expr = common::ext::WireExprType::rand(); - - Self { id, ext_wire_expr } - } - } } diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index cbf9894aef..e60388f425 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -21,8 +21,8 @@ use core::fmt; pub use declare::{ Declare, DeclareBody, DeclareFinal, DeclareInterest, DeclareKeyExpr, DeclareMode, - DeclareQueryable, DeclareSubscriber, DeclareToken, UndeclareInterest, UndeclareKeyExpr, - UndeclareQueryable, UndeclareSubscriber, UndeclareToken, + DeclareQueryable, DeclareSubscriber, DeclareToken, UndeclareKeyExpr, UndeclareQueryable, + UndeclareSubscriber, UndeclareToken, }; pub use oam::Oam; pub use push::Push; From 518816c47dbd3d885d60986b51fa185cfd9cd6a7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 17:33:27 +0200 Subject: [PATCH 133/357] publication builders in builders api --- examples/examples/z_formats.rs | 2 +- examples/examples/z_scout.rs | 2 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/mod.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh/src/api/builders.rs | 1 + zenoh/src/api/builders/publication.rs | 437 ++++++++++++ zenoh/src/api/key_expr.rs | 2 +- zenoh/src/api/publication.rs | 622 +++--------------- zenoh/src/api/session.rs | 6 +- zenoh/src/lib.rs | 6 +- zenoh/src/prelude.rs | 4 +- 12 files changed, 560 insertions(+), 528 deletions(-) create mode 100644 zenoh/src/api/builders/publication.rs diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index 69313f0e56..eab5aa035a 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // -use zenoh::key_expr::keyexpr; use zenoh::key_expr::kedefine; use zenoh::key_expr::keformat; +use zenoh::key_expr::keyexpr; kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index 11ed3a6fd8..a46b7c49fe 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -13,8 +13,8 @@ // use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::scouting::WhatAmI; use zenoh::scouting::scout; +use zenoh::scouting::WhatAmI; #[tokio::main] async fn main() { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 8654927f9f..5908778867 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -20,8 +20,8 @@ use std::str; use std::str::FromStr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::time::Timestamp; use zenoh::session::Session; +use zenoh::time::Timestamp; pub struct AlignQueryable { session: Arc, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 9a4fd35a11..77741f43fc 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -28,8 +28,8 @@ use std::time::{Duration, SystemTime}; use urlencoding::encode; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::time::Timestamp; use zenoh::session::Session; +use zenoh::time::Timestamp; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; pub mod align_queryable; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index f74d9d547a..aa03571f6f 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -29,9 +29,9 @@ use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; +use zenoh::session::Session; use zenoh::Error as ZError; use zenoh::Result as ZResult; -use zenoh::session::Session; use zenoh_result::bail; use zenoh_sync::Condition; diff --git a/zenoh/src/api/builders.rs b/zenoh/src/api/builders.rs index 09d12657a5..94dbda2dd0 100644 --- a/zenoh/src/api/builders.rs +++ b/zenoh/src/api/builders.rs @@ -12,4 +12,5 @@ // ZettaScale Zenoh Team, // +pub(crate) mod publication; pub(crate) mod sample; diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs new file mode 100644 index 0000000000..50a8c6ab42 --- /dev/null +++ b/zenoh/src/api/builders/publication.rs @@ -0,0 +1,437 @@ +use std::future::Ready; + +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::api::builders::sample::SampleBuilderTrait; +use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; +use crate::api::key_expr::KeyExpr; +use crate::api::publication::Priority; +use crate::api::sample::Locality; +use crate::api::sample::SampleKind; +#[cfg(feature = "unstable")] +use crate::api::sample::SourceInfo; +use crate::api::session::SessionRef; +use crate::api::value::Value; +use crate::api::{ + encoding::Encoding, payload::Payload, publication::Publisher, sample::Attachment, +}; +use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; +use zenoh_protocol::core::CongestionControl; +use zenoh_protocol::network::Mapping; + +pub type SessionPutBuilder<'a, 'b> = + PublicationBuilder, PublicationBuilderPut>; + +pub type SessionDeleteBuilder<'a, 'b> = + PublicationBuilder, PublicationBuilderDelete>; + +pub type PublisherPutBuilder<'a> = PublicationBuilder<&'a Publisher<'a>, PublicationBuilderPut>; + +pub type PublisherDeleteBuilder<'a> = + PublicationBuilder<&'a Publisher<'a>, PublicationBuilderDelete>; + +#[derive(Debug, Clone)] +pub struct PublicationBuilderPut { + pub(crate) payload: Payload, + pub(crate) encoding: Encoding, +} +#[derive(Debug, Clone)] +pub struct PublicationBuilderDelete; + +/// A builder for initializing [`Session::put`](crate::session::Session::put), [`Session::delete`](crate::session::Session::delete), +/// [`Publisher::put`](crate::publication::Publisher::put), and [`Publisher::delete`](crate::publication::Publisher::delete) operations. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// use zenoh::publication::CongestionControl; +/// use zenoh::sample::builder::{ValueBuilderTrait, QoSBuilderTrait}; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// session +/// .put("key/expression", "payload") +/// .encoding(Encoding::TEXT_PLAIN) +/// .congestion_control(CongestionControl::Block) +/// .res() +/// .await +/// .unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug, Clone)] +pub struct PublicationBuilder { + pub(crate) publisher: P, + pub(crate) kind: T, + pub(crate) timestamp: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} + +impl QoSBuilderTrait for PublicationBuilder, T> { + #[inline] + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + publisher: self.publisher.congestion_control(congestion_control), + ..self + } + } + #[inline] + fn priority(self, priority: Priority) -> Self { + Self { + publisher: self.publisher.priority(priority), + ..self + } + } + #[inline] + fn express(self, is_express: bool) -> Self { + Self { + publisher: self.publisher.express(is_express), + ..self + } + } +} + +impl PublicationBuilder, T> { + /// Restrict the matching subscribers that will receive the published data + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[zenoh_macros::unstable] + #[inline] + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.publisher = self.publisher.allowed_destination(destination); + self + } +} + +impl

ValueBuilderTrait for PublicationBuilder { + fn encoding>(self, encoding: T) -> Self { + Self { + kind: PublicationBuilderPut { + encoding: encoding.into(), + ..self.kind + }, + ..self + } + } + + fn payload(self, payload: IntoPayload) -> Self + where + IntoPayload: Into, + { + Self { + kind: PublicationBuilderPut { + payload: payload.into(), + ..self.kind + }, + ..self + } + } + fn value>(self, value: T) -> Self { + let Value { payload, encoding } = value.into(); + Self { + kind: PublicationBuilderPut { payload, encoding }, + ..self + } + } +} + +impl SampleBuilderTrait for PublicationBuilder { + #[cfg(feature = "unstable")] + fn source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + #[cfg(feature = "unstable")] + fn attachment>>(self, attachment: TA) -> Self { + Self { + attachment: attachment.into(), + ..self + } + } +} + +impl TimestampBuilderTrait for PublicationBuilder { + fn timestamp>>(self, timestamp: TS) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } + } +} + +impl Resolvable for PublicationBuilder { + type To = ZResult<()>; +} + +impl SyncResolve for PublicationBuilder, PublicationBuilderPut> { + #[inline] + fn res_sync(self) -> ::To { + let publisher = self.publisher.create_one_shot_publisher()?; + publisher.resolve_put( + self.kind.payload, + SampleKind::Put, + self.kind.encoding, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl SyncResolve for PublicationBuilder, PublicationBuilderDelete> { + #[inline] + fn res_sync(self) -> ::To { + let publisher = self.publisher.create_one_shot_publisher()?; + publisher.resolve_put( + Payload::empty(), + SampleKind::Delete, + Encoding::ZENOH_BYTES, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl AsyncResolve for PublicationBuilder, PublicationBuilderPut> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +impl AsyncResolve for PublicationBuilder, PublicationBuilderDelete> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +/// A builder for initializing a [`Publisher`]. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// use zenoh::publication::CongestionControl; +/// use zenoh::sample::builder::QoSBuilderTrait; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let publisher = session +/// .declare_publisher("key/expression") +/// .congestion_control(CongestionControl::Block) +/// .res() +/// .await +/// .unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct PublisherBuilder<'a, 'b: 'a> { + pub(crate) session: SessionRef<'a>, + pub(crate) key_expr: ZResult>, + pub(crate) congestion_control: CongestionControl, + pub(crate) priority: Priority, + pub(crate) is_express: bool, + pub(crate) destination: Locality, +} + +impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { + fn clone(&self) -> Self { + Self { + session: self.session.clone(), + key_expr: match &self.key_expr { + Ok(k) => Ok(k.clone()), + Err(e) => Err(zerror!("Cloned KE Error: {}", e).into()), + }, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + } + } +} + +impl QoSBuilderTrait for PublisherBuilder<'_, '_> { + /// Change the `congestion_control` to apply when routing the data. + #[inline] + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + congestion_control, + ..self + } + } + + /// Change the priority of the written data. + #[inline] + fn priority(self, priority: Priority) -> Self { + Self { priority, ..self } + } + + /// Change the `express` policy to apply when routing the data. + /// When express is set to `true`, then the message will not be batched. + /// This usually has a positive impact on latency but negative impact on throughput. + #[inline] + fn express(self, is_express: bool) -> Self { + Self { is_express, ..self } + } +} + +impl<'a, 'b> PublisherBuilder<'a, 'b> { + /// Restrict the matching subscribers that will receive the published data + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[zenoh_macros::unstable] + #[inline] + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.destination = destination; + self + } + + // internal function for perfroming the publication + fn create_one_shot_publisher(self) -> ZResult> { + Ok(Publisher { + session: self.session, + #[cfg(feature = "unstable")] + eid: 0, // This is a one shot Publisher + key_expr: self.key_expr?, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + }) + } +} + +impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { + type To = ZResult>; +} + +impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { + fn res_sync(self) -> ::To { + let mut key_expr = self.key_expr?; + if !key_expr.is_fully_optimized(&self.session) { + let session_id = self.session.id; + let expr_id = self.session.declare_prefix(key_expr.as_str()).res_sync(); + let prefix_len = key_expr + .len() + .try_into() + .expect("How did you get a key expression with a length over 2^32!?"); + key_expr = match key_expr.0 { + crate::api::key_expr::KeyExprInner::Borrowed(key_expr) + | crate::api::key_expr::KeyExprInner::BorrowedWire { key_expr, .. } => { + KeyExpr(crate::api::key_expr::KeyExprInner::BorrowedWire { + key_expr, + expr_id, + mapping: Mapping::Sender, + prefix_len, + session_id, + }) + } + crate::api::key_expr::KeyExprInner::Owned(key_expr) + | crate::api::key_expr::KeyExprInner::Wire { key_expr, .. } => { + KeyExpr(crate::api::key_expr::KeyExprInner::Wire { + key_expr, + expr_id, + mapping: Mapping::Sender, + prefix_len, + session_id, + }) + } + } + } + self.session + .declare_publication_intent(key_expr.clone()) + .res_sync()?; + #[cfg(feature = "unstable")] + let eid = self.session.runtime.next_id(); + let publisher = Publisher { + session: self.session, + #[cfg(feature = "unstable")] + eid, + key_expr, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + }; + log::trace!("publish({:?})", publisher.key_expr); + Ok(publisher) + } +} + +impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { + fn res_sync(self) -> ::To { + self.publisher.resolve_put( + self.kind.payload, + SampleKind::Put, + self.kind.encoding, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + fn res_sync(self) -> ::To { + self.publisher.resolve_put( + Payload::empty(), + SampleKind::Delete, + Encoding::ZENOH_BYTES, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + #[cfg(feature = "unstable")] + self.attachment, + ) + } +} + +impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 47d3a71c56..b6148ded41 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -57,7 +57,7 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{net::primitives::Primitives, prelude::Selector, Session, api::session::Undeclarable}; +use crate::{api::session::Undeclarable, net::primitives::Primitives, prelude::Selector, Session}; #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 2b0f813e72..a065685fa1 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -13,6 +13,10 @@ // //! Publishing primitives. +use crate::api::builders::publication::{ + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, + PublisherPutBuilder, +}; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; @@ -25,226 +29,20 @@ use crate::{ api::handlers::{Callback, DefaultHandler, IntoHandler}, Id, }; +use futures::Sink; +use std::convert::TryFrom; use std::future::Ready; +use std::pin::Pin; +use std::task::{Context, Poll}; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; +pub use zenoh_protocol::core::CongestionControl; use zenoh_protocol::network::push::ext; -use zenoh_protocol::network::Mapping; use zenoh_protocol::network::Push; use zenoh_protocol::zenoh::Del; use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::zenoh::Put; -use zenoh_result::ZResult; - -/// The kind of congestion control. -pub use zenoh_protocol::core::CongestionControl; - -#[derive(Debug, Clone)] -pub struct PublicationBuilderPut { - pub(crate) payload: Payload, - pub(crate) encoding: Encoding, -} -#[derive(Debug, Clone)] -pub struct PublicationBuilderDelete; - -/// A builder for initializing [`Session::put`](crate::Session::put), [`Session::delete`](crate::Session::delete), -/// [`Publisher::put`](crate::Publisher::put), and [`Publisher::delete`](crate::Publisher::delete) operations. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// use zenoh::sample::builder::{ValueBuilderTrait, QoSBuilderTrait}; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// session -/// .put("key/expression", "payload") -/// .encoding(Encoding::TEXT_PLAIN) -/// .congestion_control(CongestionControl::Block) -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug, Clone)] -pub struct PublicationBuilder { - pub(crate) publisher: P, - pub(crate) kind: T, - pub(crate) timestamp: Option, - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -pub type SessionPutBuilder<'a, 'b> = - PublicationBuilder, PublicationBuilderPut>; - -pub type SessionDeleteBuilder<'a, 'b> = - PublicationBuilder, PublicationBuilderDelete>; - -pub type PublisherPutBuilder<'a> = PublicationBuilder<&'a Publisher<'a>, PublicationBuilderPut>; - -pub type PublisherDeleteBuilder<'a> = - PublicationBuilder<&'a Publisher<'a>, PublicationBuilderDelete>; - -impl QoSBuilderTrait for PublicationBuilder, T> { - #[inline] - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - publisher: self.publisher.congestion_control(congestion_control), - ..self - } - } - #[inline] - fn priority(self, priority: Priority) -> Self { - Self { - publisher: self.publisher.priority(priority), - ..self - } - } - #[inline] - fn express(self, is_express: bool) -> Self { - Self { - publisher: self.publisher.express(is_express), - ..self - } - } -} - -impl PublicationBuilder, T> { - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.publisher = self.publisher.allowed_destination(destination); - self - } -} - -impl

ValueBuilderTrait for PublicationBuilder { - fn encoding>(self, encoding: T) -> Self { - Self { - kind: PublicationBuilderPut { - encoding: encoding.into(), - ..self.kind - }, - ..self - } - } - - fn payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { - Self { - kind: PublicationBuilderPut { - payload: payload.into(), - ..self.kind - }, - ..self - } - } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { - kind: PublicationBuilderPut { payload, encoding }, - ..self - } - } -} - -impl SampleBuilderTrait for PublicationBuilder { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: TA) -> Self { - Self { - attachment: attachment.into(), - ..self - } - } -} - -impl TimestampBuilderTrait for PublicationBuilder { - fn timestamp>>(self, timestamp: TS) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -impl Resolvable for PublicationBuilder { - type To = ZResult<()>; -} - -impl SyncResolve for PublicationBuilder, PublicationBuilderPut> { - #[inline] - fn res_sync(self) -> ::To { - let publisher = self.publisher.create_one_shot_publisher()?; - resolve_put( - &publisher, - self.kind.payload, - SampleKind::Put, - self.kind.encoding, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, - #[cfg(feature = "unstable")] - self.attachment, - ) - } -} - -impl SyncResolve for PublicationBuilder, PublicationBuilderDelete> { - #[inline] - fn res_sync(self) -> ::To { - let publisher = self.publisher.create_one_shot_publisher()?; - resolve_put( - &publisher, - Payload::empty(), - SampleKind::Delete, - Encoding::ZENOH_BYTES, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, - #[cfg(feature = "unstable")] - self.attachment, - ) - } -} - -impl AsyncResolve for PublicationBuilder, PublicationBuilderPut> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -impl AsyncResolve for PublicationBuilder, PublicationBuilderDelete> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -use futures::Sink; -use std::convert::TryFrom; -use std::convert::TryInto; -use std::pin::Pin; -use std::task::{Context, Poll}; use zenoh_result::Error; +use zenoh_result::ZResult; #[zenoh_macros::unstable] #[derive(Clone)] @@ -686,54 +484,6 @@ impl Drop for Publisher<'_> { } } -impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { - fn res_sync(self) -> ::To { - resolve_put( - self.publisher, - self.kind.payload, - SampleKind::Put, - self.kind.encoding, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, - #[cfg(feature = "unstable")] - self.attachment, - ) - } -} - -impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { - fn res_sync(self) -> ::To { - resolve_put( - self.publisher, - Payload::empty(), - SampleKind::Delete, - Encoding::ZENOH_BYTES, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, - #[cfg(feature = "unstable")] - self.attachment, - ) - } -} - -impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - impl<'a> Sink for Publisher<'a> { type Error = Error; @@ -752,8 +502,7 @@ impl<'a> Sink for Publisher<'a> { attachment, .. } = item.into(); - resolve_put( - &self, + self.resolve_put( payload, kind, encoding, @@ -776,267 +525,108 @@ impl<'a> Sink for Publisher<'a> { } } -/// A builder for initializing a [`Publisher`]. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// use zenoh::sample::builder::QoSBuilderTrait; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let publisher = session -/// .declare_publisher("key/expression") -/// .congestion_control(CongestionControl::Block) -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct PublisherBuilder<'a, 'b: 'a> { - pub(crate) session: SessionRef<'a>, - pub(crate) key_expr: ZResult>, - pub(crate) congestion_control: CongestionControl, - pub(crate) priority: Priority, - pub(crate) is_express: bool, - pub(crate) destination: Locality, -} - -impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { - fn clone(&self) -> Self { - Self { - session: self.session.clone(), - key_expr: match &self.key_expr { - Ok(k) => Ok(k.clone()), - Err(e) => Err(zerror!("Cloned KE Error: {}", e).into()), - }, - congestion_control: self.congestion_control, - priority: self.priority, - is_express: self.is_express, - destination: self.destination, - } - } -} - -impl QoSBuilderTrait for PublisherBuilder<'_, '_> { - /// Change the `congestion_control` to apply when routing the data. - #[inline] - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - congestion_control, - ..self - } - } - - /// Change the priority of the written data. - #[inline] - fn priority(self, priority: Priority) -> Self { - Self { priority, ..self } - } - - /// Change the `express` policy to apply when routing the data. - /// When express is set to `true`, then the message will not be batched. - /// This usually has a positive impact on latency but negative impact on throughput. - #[inline] - fn express(self, is_express: bool) -> Self { - Self { is_express, ..self } - } -} - -impl<'a, 'b> PublisherBuilder<'a, 'b> { - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.destination = destination; - self - } - - // internal function for perfroming the publication - fn create_one_shot_publisher(self) -> ZResult> { - Ok(Publisher { - session: self.session, - #[cfg(feature = "unstable")] - eid: 0, // This is a one shot Publisher - key_expr: self.key_expr?, - congestion_control: self.congestion_control, - priority: self.priority, - is_express: self.is_express, - destination: self.destination, - }) - } -} - -impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { - type To = ZResult>; -} - -impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { - fn res_sync(self) -> ::To { - let mut key_expr = self.key_expr?; - if !key_expr.is_fully_optimized(&self.session) { - let session_id = self.session.id; - let expr_id = self.session.declare_prefix(key_expr.as_str()).res_sync(); - let prefix_len = key_expr - .len() - .try_into() - .expect("How did you get a key expression with a length over 2^32!?"); - key_expr = match key_expr.0 { - crate::api::key_expr::KeyExprInner::Borrowed(key_expr) - | crate::api::key_expr::KeyExprInner::BorrowedWire { key_expr, .. } => { - KeyExpr(crate::api::key_expr::KeyExprInner::BorrowedWire { - key_expr, - expr_id, - mapping: Mapping::Sender, - prefix_len, - session_id, - }) - } - crate::api::key_expr::KeyExprInner::Owned(key_expr) - | crate::api::key_expr::KeyExprInner::Wire { key_expr, .. } => { - KeyExpr(crate::api::key_expr::KeyExprInner::Wire { - key_expr, - expr_id, - mapping: Mapping::Sender, - prefix_len, - session_id, - }) - } - } - } - self.session - .declare_publication_intent(key_expr.clone()) - .res_sync()?; - #[cfg(feature = "unstable")] - let eid = self.session.runtime.next_id(); - let publisher = Publisher { - session: self.session, - #[cfg(feature = "unstable")] - eid, - key_expr, - congestion_control: self.congestion_control, - priority: self.priority, - is_express: self.is_express, - destination: self.destination, +impl Publisher<'_> { + pub(crate) fn resolve_put( + &self, + payload: Payload, + kind: SampleKind, + encoding: Encoding, + timestamp: Option, + #[cfg(feature = "unstable")] source_info: SourceInfo, + #[cfg(feature = "unstable")] attachment: Option, + ) -> ZResult<()> { + log::trace!("write({:?}, [...])", &self.key_expr); + let primitives = zread!(self.session.state) + .primitives + .as_ref() + .unwrap() + .clone(); + let timestamp = if timestamp.is_none() { + self.session.runtime.new_timestamp() + } else { + timestamp }; - log::trace!("publish({:?})", publisher.key_expr); - Ok(publisher) - } -} - -impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -fn resolve_put( - publisher: &Publisher<'_>, - payload: Payload, - kind: SampleKind, - encoding: Encoding, - timestamp: Option, - #[cfg(feature = "unstable")] source_info: SourceInfo, - #[cfg(feature = "unstable")] attachment: Option, -) -> ZResult<()> { - log::trace!("write({:?}, [...])", &publisher.key_expr); - let primitives = zread!(publisher.session.state) - .primitives - .as_ref() - .unwrap() - .clone(); - let timestamp = if timestamp.is_none() { - publisher.session.runtime.new_timestamp() - } else { - timestamp - }; - if publisher.destination != Locality::SessionLocal { - primitives.send_push(Push { - wire_expr: publisher.key_expr.to_wire(&publisher.session).to_owned(), - ext_qos: ext::QoSType::new( - publisher.priority.into(), - publisher.congestion_control, - publisher.is_express, - ), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - payload: match kind { - SampleKind::Put => { - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); + if self.destination != Locality::SessionLocal { + primitives.send_push(Push { + wire_expr: self.key_expr.to_wire(&self.session).to_owned(), + ext_qos: ext::QoSType::new( + self.priority.into(), + self.congestion_control, + self.is_express, + ), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + payload: match kind { + SampleKind::Put => { + #[allow(unused_mut)] + let mut ext_attachment = None; + #[cfg(feature = "unstable")] + { + if let Some(attachment) = attachment.clone() { + ext_attachment = Some(attachment.into()); + } } + PushBody::Put(Put { + timestamp, + encoding: encoding.clone().into(), + #[cfg(feature = "unstable")] + ext_sinfo: source_info.into(), + #[cfg(not(feature = "unstable"))] + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment, + ext_unknown: vec![], + payload: payload.clone().into(), + }) } - PushBody::Put(Put { - timestamp, - encoding: encoding.clone().into(), + SampleKind::Delete => { + #[allow(unused_mut)] + let mut ext_attachment = None; #[cfg(feature = "unstable")] - ext_sinfo: source_info.into(), - #[cfg(not(feature = "unstable"))] - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment, - ext_unknown: vec![], - payload: payload.clone().into(), - }) - } - SampleKind::Delete => { - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); + { + if let Some(attachment) = attachment.clone() { + ext_attachment = Some(attachment.into()); + } } + PushBody::Del(Del { + timestamp, + #[cfg(feature = "unstable")] + ext_sinfo: source_info.into(), + #[cfg(not(feature = "unstable"))] + ext_sinfo: None, + ext_attachment, + ext_unknown: vec![], + }) } - PushBody::Del(Del { - timestamp, - #[cfg(feature = "unstable")] - ext_sinfo: source_info.into(), - #[cfg(not(feature = "unstable"))] - ext_sinfo: None, - ext_attachment, - ext_unknown: vec![], - }) - } - }, - }); - } - if publisher.destination != Locality::Remote { - let data_info = DataInfo { - kind, - encoding: Some(encoding), - timestamp, - source_id: None, - source_sn: None, - qos: QoS::from(ext::QoSType::new( - publisher.priority.into(), - publisher.congestion_control, - publisher.is_express, - )), - }; - - publisher.session.handle_data( - true, - &publisher.key_expr.to_wire(&publisher.session), - Some(data_info), - payload.into(), - #[cfg(feature = "unstable")] - attachment, - ); + }, + }); + } + if self.destination != Locality::Remote { + let data_info = DataInfo { + kind, + encoding: Some(encoding), + timestamp, + source_id: None, + source_sn: None, + qos: QoS::from(ext::QoSType::new( + self.priority.into(), + self.congestion_control, + self.is_express, + )), + }; + + self.session.handle_data( + true, + &self.key_expr.to_wire(&self.session), + Some(data_info), + payload.into(), + #[cfg(feature = "unstable")] + attachment, + ); + } + Ok(()) } - Ok(()) } /// The Priority of zenoh messages. diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 5793794815..61ac272039 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -12,6 +12,9 @@ // ZettaScale Zenoh Team, // use crate::api::admin; +use crate::api::builders::publication::PublicationBuilder; +use crate::api::builders::publication::PublicationBuilderDelete; +use crate::api::builders::publication::PublicationBuilderPut; use crate::api::encoding::Encoding; use crate::api::handlers::{Callback, DefaultHandler}; use crate::api::info::SessionInfo; @@ -21,9 +24,6 @@ use crate::api::key_expr::KeyExprInner; use crate::api::liveliness::{Liveliness, LivelinessTokenState}; use crate::api::publication::MatchingListenerState; use crate::api::publication::MatchingStatus; -use crate::api::publication::PublicationBuilder; -use crate::api::publication::PublicationBuilderDelete; -use crate::api::publication::PublicationBuilderPut; use crate::api::query::GetBuilder; use crate::api::query::QueryState; use crate::api::query::Reply; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3bb919c933..e2cb570a37 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -142,6 +142,8 @@ pub mod key_expr { } pub mod session { + pub use crate::api::builders::publication::SessionDeleteBuilder; + pub use crate::api::builders::publication::SessionPutBuilder; pub use crate::api::session::init; pub use crate::api::session::open; pub use crate::api::session::Session; @@ -159,6 +161,8 @@ pub mod sample { pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; pub use crate::api::sample::SampleKind; + #[zenoh_macros::unstable] + pub use crate::api::sample::SourceInfo; } pub mod value { @@ -192,10 +196,10 @@ pub mod subscriber { } pub mod publication { + pub use crate::api::builders::publication::PublisherBuilder; pub use crate::api::publication::CongestionControl; pub use crate::api::publication::Priority; pub use crate::api::publication::Publisher; - pub use crate::api::publication::PublisherBuilder; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; } diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index a0bec8d9ce..bffd9280b5 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -61,12 +61,12 @@ pub(crate) mod common { pub use crate::api::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - pub use crate::api::sample::builder::{ + pub use crate::api::builders::sample::{ QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; #[zenoh_macros::unstable] - pub use crate::api::sample::builder::SampleBuilderTrait; + pub use crate::api::builders::sample::SampleBuilderTrait; } /// Prelude to import when using Zenoh's sync API. From c0b6751e625dec4e50ab1a1aceba059d280b3a72 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 17:44:28 +0200 Subject: [PATCH 134/357] removed internal typedef Id from root lib.rs --- zenoh/src/api.rs | 2 ++ zenoh/src/api/liveliness.rs | 2 +- zenoh/src/api/publication.rs | 2 +- zenoh/src/api/queryable.rs | 2 +- zenoh/src/api/session.rs | 2 +- zenoh/src/api/subscriber.rs | 2 +- zenoh/src/lib.rs | 2 -- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 44496822ea..14eb3ef2f2 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -12,6 +12,8 @@ // ZettaScale Zenoh Team, // +pub(crate) type Id = u32; + pub(crate) mod admin; pub(crate) mod builders; pub(crate) mod encoding; diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index dac046324d..77d2d66f46 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -17,7 +17,7 @@ //! see [`Liveliness`] use zenoh_protocol::network::request; -use crate::{api::query::Reply, Id}; +use crate::{api::query::Reply, api::Id}; #[zenoh_macros::unstable] use { diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index a065685fa1..39ca9d9f9c 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -27,7 +27,7 @@ use crate::prelude::*; #[cfg(feature = "unstable")] use crate::{ api::handlers::{Callback, DefaultHandler, IntoHandler}, - Id, + api::Id, }; use futures::Sink; use std::convert::TryFrom; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index df76b6441f..479f9aee19 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -22,7 +22,7 @@ use crate::api::session::SessionRef; use crate::api::session::Undeclarable; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::Id; +use crate::api::Id; #[cfg(feature = "unstable")] use crate::{api::query::ReplyKeyExpr, api::sample::Attachment}; use std::fmt; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 61ac272039..3720ea129f 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -49,7 +49,7 @@ use crate::prelude::Locality; use crate::publication::*; use crate::query::*; use crate::queryable::*; -use crate::Id; +use crate::api::Id; use crate::Priority; use crate::Sample; use crate::SampleKind; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index c549542b3b..560c37a371 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -18,7 +18,7 @@ use crate::api::key_expr::KeyExpr; use crate::api::sample::Sample; use crate::api::session::Undeclarable; use crate::prelude::Locality; -use crate::Id; +use crate::api::Id; use crate::{api::session::SessionRef, Result as ZResult}; use std::fmt; use std::future::Ready; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e2cb570a37..325fa894fb 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -82,8 +82,6 @@ extern crate zenoh_result; mod api; mod net; -pub(crate) type Id = u32; - use git_version::git_version; #[cfg(feature = "unstable")] use prelude::*; From 35378b32ba33757608844a65beb8a765600d7835 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 18:18:31 +0200 Subject: [PATCH 135/357] keyexpr disabled in prelude --- Cargo.lock | 1 - examples/examples/z_delete.rs | 1 + examples/examples/z_forward.rs | 1 + examples/examples/z_ping.rs | 1 + examples/examples/z_pong.rs | 1 + examples/examples/z_pub.rs | 1 + examples/examples/z_pull.rs | 2 +- examples/examples/z_put.rs | 1 + examples/examples/z_put_float.rs | 1 + examples/examples/z_queryable.rs | 1 + examples/examples/z_storage.rs | 1 + examples/examples/z_sub.rs | 1 + plugins/zenoh-backend-example/src/lib.rs | 2 +- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 6 +++--- plugins/zenoh-plugin-example/src/lib.rs | 1 + .../zenoh-plugin-rest/examples/z_serve_sse.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 1 + .../zenoh-plugin-storage-manager/Cargo.toml | 1 - .../zenoh-plugin-storage-manager/src/lib.rs | 1 + .../src/memory_backend/mod.rs | 1 + .../src/replica/align_queryable.rs | 1 + .../src/replica/aligner.rs | 1 + .../src/replica/mod.rs | 1 + .../src/replica/storage.rs | 9 +++++---- zenoh-ext/src/group.rs | 3 +++ zenoh-ext/src/publication_cache.rs | 1 + zenoh-ext/src/querying_subscriber.rs | 1 + zenoh-ext/src/session_ext.rs | 6 ++++-- zenoh/src/api/admin.rs | 5 +++-- zenoh/src/api/builders/sample.rs | 2 +- zenoh/src/api/key_expr.rs | 3 +-- zenoh/src/api/liveliness.rs | 8 ++++---- zenoh/src/api/publication.rs | 2 ++ zenoh/src/api/query.rs | 2 ++ zenoh/src/api/queryable.rs | 3 ++- zenoh/src/api/session.rs | 2 +- zenoh/src/api/subscriber.rs | 2 +- zenoh/src/lib.rs | 19 +++++++++++++------ zenoh/src/net/routing/dispatcher/face.rs | 2 +- zenoh/src/net/routing/interceptor/mod.rs | 2 +- zenoh/src/prelude.rs | 2 +- zenoh/tests/session.rs | 1 + zenoh/tests/unicity.rs | 1 + 44 files changed, 73 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9dff82ad80..75a045d9b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4963,7 +4963,6 @@ dependencies = [ "zenoh", "zenoh-collections", "zenoh-core", - "zenoh-keyexpr", "zenoh-plugin-trait", "zenoh-result", "zenoh-util", diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index a090458c71..7f48f90c96 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 486ccc4fdb..349690c8a8 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index a57c937e48..a989b34482 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -14,6 +14,7 @@ use clap::Parser; use std::time::{Duration, Instant}; use zenoh::config::Config; +use zenoh::key_expr::keyexpr; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index baa5683f62..60f6db0b68 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::keyexpr; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 8cd3c4edba..176e991fff 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -14,6 +14,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 4e44930f4f..3d4ff30e2b 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::{config::Config, handlers::RingBuffer, prelude::r#async::*}; +use zenoh::{config::Config, handlers::RingBuffer, key_expr::KeyExpr, prelude::r#async::*}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index 7b38490507..b6039d09ba 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 33482e4680..86f1ce3c08 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 83ac63ce1f..025f3cc1cc 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index cb2f40c125..50d84001a8 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -17,6 +17,7 @@ use clap::Parser; use futures::select; use std::collections::HashMap; use zenoh::config::Config; +use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index fbce562c2e..f7e232f240 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index f81231a498..3663f3249e 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -14,7 +14,7 @@ use async_std::sync::RwLock; use async_trait::async_trait; use std::collections::{hash_map::Entry, HashMap}; -use zenoh::{prelude::OwnedKeyExpr, time::Timestamp, value::Value}; +use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index 5ab59ebe45..cfbc1566c8 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -17,7 +17,7 @@ use schemars::JsonSchema; use serde_json::{Map, Value}; use std::convert::TryFrom; use std::time::Duration; -use zenoh::{prelude::keyexpr, prelude::OwnedKeyExpr, Result as ZResult}; +use zenoh::{key_expr::keyexpr, key_expr::OwnedKeyExpr, Result as ZResult}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 40d022f1ec..4340c454fa 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -125,10 +125,10 @@ use async_trait::async_trait; use const_format::concatcp; -use zenoh::prelude::OwnedKeyExpr; +use zenoh::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh::time::Timestamp; use zenoh::value::Value; -pub use zenoh::Result as ZResult; +use zenoh::Result as ZResult; use zenoh_plugin_trait::{PluginControl, PluginInstance, PluginStatusRec, StructVersion}; use zenoh_util::concat_enabled_features; @@ -212,7 +212,7 @@ impl StructVersion for VolumeInstance { } impl PluginControl for VolumeInstance { - fn plugins_status(&self, _names: &zenoh::prelude::keyexpr) -> Vec { + fn plugins_status(&self, _names: &keyexpr) -> Vec { Vec::new() } } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 5615ce68af..9d25f582fb 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -22,6 +22,7 @@ use std::sync::{ atomic::{AtomicBool, Ordering::Relaxed}, Arc, Mutex, }; +use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 85d730bb41..6a278c4784 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -13,6 +13,7 @@ // use clap::{arg, Command}; use std::time::Duration; +use zenoh::key_expr::keyexpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 49c58f5074..e2434c644c 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,6 +29,7 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; +use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 65b15686f7..fe9359f696 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -49,7 +49,6 @@ urlencoding = { workspace = true } zenoh = { workspace = true, features = ["unstable"] } zenoh-collections = { workspace = true } zenoh-core = { workspace = true } -zenoh-keyexpr = { workspace = true } zenoh-plugin-trait = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 78a9814179..7d679ef37d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -27,6 +27,7 @@ use std::convert::TryFrom; use std::sync::Arc; use std::sync::Mutex; use storages_mgt::StorageMessage; +use zenoh::key_expr::keyexpr; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::sync::*; use zenoh::runtime::Runtime; diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index 4e333b8592..d9f330ea8c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -15,6 +15,7 @@ use async_std::sync::RwLock; use async_trait::async_trait; use std::collections::HashMap; use std::sync::Arc; +use zenoh::key_expr::OwnedKeyExpr; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 5908778867..d73b9b2b6d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,6 +18,7 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; +use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::session::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 23bf066263..0553710851 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,6 +18,7 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; +use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::sample::SampleBuilder; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 77741f43fc..5289fc47af 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,6 +26,7 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; +use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::session::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 646aebc837..007b21083b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,6 +23,11 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; +use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; +use zenoh::key_expr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; +use zenoh::key_expr::keyexpr_tree::{KeBoxTree, NonWild, UnknownWildness}; +use zenoh::key_expr::KeyExpr; +use zenoh::key_expr::OwnedKeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::SampleBuilder; @@ -32,10 +37,6 @@ use zenoh::value::Value; use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_keyexpr::key_expr::OwnedKeyExpr; -use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; -use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; -use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; use zenoh_result::bail; use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index aa03571f6f..7ede485784 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,6 +25,9 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tokio::task::JoinHandle; +use zenoh::key_expr::keyexpr; +use zenoh::key_expr::KeyExpr; +use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index b8b7c79cec..41766fa1fa 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -15,6 +15,7 @@ use flume::{bounded, Sender}; use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; use std::future::Ready; +use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; use zenoh::session::SessionRef; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 24501f9eca..6ad417f774 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -18,6 +18,7 @@ use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; use zenoh::sample::SampleBuilder; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 2c9826c98b..2dd0fbd873 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -14,8 +14,10 @@ use super::PublicationCacheBuilder; use std::convert::TryInto; use std::sync::Arc; -use zenoh::prelude::KeyExpr; -use zenoh::session::{Session, SessionRef}; +use zenoh::{ + key_expr::KeyExpr, + session::{Session, SessionRef}, +}; /// Some extensions to the [`zenoh::Session`](zenoh::Session) pub trait SessionExt<'s, 'a> { diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 678f6d1bbb..1a5d52fb4c 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -13,10 +13,10 @@ // use crate::{ api::encoding::Encoding, + api::key_expr::KeyExpr, api::queryable::Query, api::sample::DataInfo, - keyexpr, - prelude::sync::{KeyExpr, Locality, SampleKind}, + prelude::sync::{Locality, SampleKind}, Payload, Session, }; use std::{ @@ -25,6 +25,7 @@ use std::{ sync::Arc, }; use zenoh_core::{Result as ZResult, SyncResolve}; +use zenoh_keyexpr::keyexpr; use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use zenoh_transport::{ TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 87b2b928ff..abf8e2446a 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -14,6 +14,7 @@ use std::marker::PhantomData; +use crate::api::key_expr::KeyExpr; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::QoS; @@ -22,7 +23,6 @@ use crate::api::sample::QoSBuilder; use crate::api::sample::SourceInfo; use crate::api::value::Value; use crate::Encoding; -use crate::KeyExpr; use crate::Payload; use crate::Priority; use crate::Sample; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index b6148ded41..f3a5b90a98 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -49,8 +49,7 @@ use std::{ str::FromStr, }; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -pub use zenoh_keyexpr::*; -pub use zenoh_macros::{kedefine, keformat, kewrite}; +use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::{ core::{key_expr::canon::Canonizable, ExprId, WireExpr}, network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 77d2d66f46..7fc830be3d 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -15,20 +15,18 @@ //! Liveliness primitives. //! //! see [`Liveliness`] -use zenoh_protocol::network::request; - -use crate::{api::query::Reply, api::Id}; - #[zenoh_macros::unstable] use { crate::{ api::handlers::locked, api::handlers::DefaultHandler, + api::key_expr::KeyExpr, api::session::SessionRef, api::session::Undeclarable, api::subscriber::{Subscriber, SubscriberInner}, prelude::*, }, + crate::{api::query::Reply, api::Id}, std::convert::TryInto, std::future::Ready, std::sync::Arc, @@ -38,7 +36,9 @@ use { zenoh_core::Resolvable, zenoh_core::Result as ZResult, zenoh_core::SyncResolve, + zenoh_keyexpr::keyexpr, zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo, + zenoh_protocol::network::request, }; #[zenoh_macros::unstable] diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 39ca9d9f9c..98064a1b99 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -17,6 +17,7 @@ use crate::api::builders::publication::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, PublisherPutBuilder, }; +use crate::api::key_expr::KeyExpr; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; @@ -35,6 +36,7 @@ use std::future::Ready; use std::pin::Pin; use std::task::{Context, Poll}; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_keyexpr::keyexpr; pub use zenoh_protocol::core::CongestionControl; use zenoh_protocol::network::push::ext; use zenoh_protocol::network::Push; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 1e749132da..ed9bff7776 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -14,6 +14,7 @@ //! Query primitives. use crate::api::handlers::{locked, Callback, DefaultHandler}; +use crate::api::key_expr::KeyExpr; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; @@ -23,6 +24,7 @@ use std::collections::HashMap; use std::future::Ready; use std::time::Duration; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_keyexpr::OwnedKeyExpr; use zenoh_result::ZResult; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 479f9aee19..e46dab3c49 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -14,15 +14,16 @@ //! Queryable primitives. +use super::key_expr::KeyExpr; use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; use crate::api::handlers::{locked, DefaultHandler}; use crate::api::sample::{QoSBuilder, SourceInfo}; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; +use crate::api::Id; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::api::Id; #[cfg(feature = "unstable")] use crate::{api::query::ReplyKeyExpr, api::sample::Attachment}; use std::fmt; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 3720ea129f..59fdcf78e7 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -41,6 +41,7 @@ use crate::api::selector::TIME_RANGE_KEY; use crate::api::subscriber::SubscriberBuilder; use crate::api::subscriber::SubscriberState; use crate::api::value::Value; +use crate::api::Id; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; @@ -49,7 +50,6 @@ use crate::prelude::Locality; use crate::publication::*; use crate::query::*; use crate::queryable::*; -use crate::api::Id; use crate::Priority; use crate::Sample; use crate::SampleKind; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 560c37a371..0c1303e638 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -17,8 +17,8 @@ use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::sample::Sample; use crate::api::session::Undeclarable; -use crate::prelude::Locality; use crate::api::Id; +use crate::prelude::Locality; use crate::{api::session::SessionRef, Result as ZResult}; use std::fmt; use std::future::Ready; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 325fa894fb..055be82543 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -126,15 +126,22 @@ pub mod buffers { } pub mod key_expr { - pub use crate::api::key_expr::kedefine; - pub use crate::api::key_expr::keformat; - pub use crate::api::key_expr::keyexpr; - pub use crate::api::key_expr::OwnedKeyExpr; + pub mod keyexpr_tree { + pub use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; + pub use zenoh_keyexpr::keyexpr_tree::{ + support::NonWild, support::UnknownWildness, KeBoxTree, + }; + pub use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; + } + pub use crate::api::key_expr::KeyExpr; + pub use zenoh_keyexpr::keyexpr; + pub use zenoh_keyexpr::OwnedKeyExpr; + pub use zenoh_macros::{kedefine, keformat, kewrite}; // keyexpr format macro support pub mod format { - pub use crate::api::key_expr::format::*; + pub use zenoh_keyexpr::format::*; pub mod macro_support { - pub use crate::api::key_expr::format::macro_support::*; + pub use zenoh_keyexpr::format::macro_support::*; } } } diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index cb565053c9..7ac6e10995 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -14,9 +14,9 @@ use super::super::router::*; use super::tables::TablesLock; use super::{resource::*, tables}; +use crate::api::key_expr::KeyExpr; use crate::net::primitives::{McastMux, Mux, Primitives}; use crate::net::routing::interceptor::{InterceptorTrait, InterceptorsChain}; -use crate::KeyExpr; use std::any::Any; use std::collections::HashMap; use std::fmt; diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 9dfc03ac7e..23d90c30c7 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -18,7 +18,7 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use super::RoutingContext; -use crate::KeyExpr; +use crate::api::key_expr::KeyExpr; use std::any::Any; use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index bffd9280b5..6e27fc3d6d 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -24,7 +24,7 @@ pub use common::*; pub(crate) mod common { - pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; + // pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; pub use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 8c2d2e9937..35f3dac2c6 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -14,6 +14,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_core::ztimeout; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index f34704fb7e..0033ed6468 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -15,6 +15,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::runtime::Handle; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_core::ztimeout; From 142ca2fef862d0650b2279b6436b3e7a49e58842 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 5 Apr 2024 18:22:54 +0200 Subject: [PATCH 136/357] resolve disabled in prelude --- zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/api/liveliness.rs | 1 + zenoh/src/api/queryable.rs | 2 +- zenoh/src/prelude.rs | 12 ++++++------ 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 41766fa1fa..e1a974b6ff 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -20,7 +20,7 @@ use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; use zenoh::session::SessionRef; use zenoh::subscriber::FlumeSubscriber; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_result::{bail, ZResult}; use zenoh_util::core::ResolveFuture; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 6ad417f774..bdcab8f220 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -26,7 +26,7 @@ use zenoh::session::SessionRef; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; -use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{zlock, AsyncResolve, Resolvable, Resolve, SyncResolve}; use crate::ExtractSample; diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 7fc830be3d..51ff59e5cc 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -40,6 +40,7 @@ use { zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo, zenoh_protocol::network::request, }; +use {zenoh_core::Resolve}; #[zenoh_macros::unstable] pub(crate) static PREFIX_LIVELINESS: &str = crate::net::routing::PREFIX_LIVELINESS; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index e46dab3c49..37afb900aa 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -31,7 +31,7 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::{ core::{EntityId, WireExpr}, network::{response, Mapping, RequestId, Response, ResponseFinal}, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 6e27fc3d6d..d26c146d4c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -25,12 +25,12 @@ pub use common::*; pub(crate) mod common { // pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; - pub use zenoh_buffers::{ - buffer::{Buffer, SplitBuffer}, - reader::HasReader, - writer::HasWriter, - }; - pub use zenoh_core::Resolve; + // pub use zenoh_buffers::{ + // buffer::{Buffer, SplitBuffer}, + // reader::HasReader, + // writer::HasWriter, + // }; + // pub use zenoh_core::Resolve; pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; #[zenoh_macros::unstable] From 62d8e7854a9ff9343c2fdb0ecb633e2e937d6d21 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 6 Apr 2024 15:41:54 +0200 Subject: [PATCH 137/357] allowed build zenoh without unstable feature set --- zenoh/src/publication.rs | 1 + zenoh/src/query.rs | 2 ++ zenoh/src/queryable.rs | 12 ++++++++++-- zenoh/src/sample/builder.rs | 1 + zenoh/src/sample/mod.rs | 8 ++++---- zenoh/src/session.rs | 6 +++++- zenoh/src/subscriber.rs | 3 --- 7 files changed, 23 insertions(+), 10 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index c176ad32e0..4f31c73a24 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -157,6 +157,7 @@ impl

ValueBuilderTrait for PublicationBuilder { } } +#[zenoh_macros::unstable] impl SampleBuilderTrait for PublicationBuilder { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index cb1116130d..3a380bd1c9 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -133,6 +133,7 @@ pub struct GetBuilder<'a, 'b, Handler> { pub(crate) source_info: SourceInfo, } +#[zenoh_macros::unstable] impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { @@ -430,6 +431,7 @@ where self.value, #[cfg(feature = "unstable")] self.attachment, + #[cfg(feature = "unstable")] self.source_info, callback, ) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 6fbb4e9090..0ad3a36c07 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,12 +18,15 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::{QoSBuilder, SourceInfo}; +use crate::sample::builder::SampleBuilder; +use crate::sample::QoSBuilder; +#[cfg(feature = "unstable")] +use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::builder::SampleBuilder, sample::Attachment}; +use crate::{query::ReplyKeyExpr, sample::Attachment}; use std::fmt; use std::future::Ready; use std::ops::Deref; @@ -155,7 +158,9 @@ impl Query { encoding: Encoding::default(), }, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -193,7 +198,9 @@ impl Query { qos: response::ext::QoSType::RESPONSE.into(), kind: ReplyBuilderDelete, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -298,6 +305,7 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { } } +#[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] fn attachment>>(self, attachment: U) -> Self { diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index fca55edd09..bad35024ef 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -163,6 +163,7 @@ impl TimestampBuilderTrait for SampleBuilder { } } +#[cfg(feature = "unstable")] impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self { diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 6e457578a3..0ef8462d2a 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -22,9 +22,9 @@ use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; +use zenoh_protocol::core::CongestionControl; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::ext::QoSType; -use zenoh_protocol::{core::CongestionControl, zenoh}; pub mod builder; @@ -178,12 +178,12 @@ impl SourceInfo { } #[zenoh_macros::unstable] -impl From for Option { - fn from(source_info: SourceInfo) -> Option { +impl From for Option { + fn from(source_info: SourceInfo) -> Option { if source_info.is_empty() { None } else { - Some(zenoh::put::ext::SourceInfoType { + Some(zenoh_protocol::zenoh::put::ext::SourceInfoType { id: source_info.source_id.unwrap_or_default(), sn: source_info.source_sn.unwrap_or_default() as u32, }) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index f694eb6420..181976dcb0 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -61,6 +61,8 @@ use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; #[cfg(feature = "unstable")] use zenoh_protocol::network::declare::SubscriberId; +#[cfg(feature = "unstable")] +use zenoh_protocol::network::ext; use zenoh_protocol::network::AtomicRequestId; use zenoh_protocol::network::RequestId; use zenoh_protocol::zenoh::reply::ReplyBody; @@ -77,7 +79,6 @@ use zenoh_protocol::{ subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, - ext, request::{self, ext::TargetType, Request}, Mapping, Push, Response, ResponseFinal, }, @@ -1687,7 +1688,10 @@ impl Session { payload: RequestBody::Query(zenoh_protocol::zenoh::Query { consolidation, parameters: selector.parameters().to_string(), + #[cfg(feature = "unstable")] ext_sinfo: source.into(), + #[cfg(not(feature = "unstable"))] + ext_sinfo: None, ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 60a31a6577..47d41ebb1f 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -202,9 +202,6 @@ pub struct SubscriberBuilder<'a, 'b, Handler> { #[cfg(not(feature = "unstable"))] pub(crate) reliability: Reliability, - #[cfg(not(feature = "unstable"))] - pub(crate) mode: Mode, - #[cfg(feature = "unstable")] pub origin: Locality, #[cfg(not(feature = "unstable"))] From e1cb5df7451aceaaecad7e5cd4ec5454aef03ed0 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 6 Apr 2024 16:15:48 +0200 Subject: [PATCH 138/357] unfinished - priority resolving problem --- zenoh/src/api.rs | 2 +- zenoh/src/api/admin.rs | 9 +++------ zenoh/src/api/builders/publication.rs | 7 +++---- zenoh/src/api/builders/sample.rs | 10 +++++----- zenoh/src/api/key_expr.rs | 7 +++++-- zenoh/src/api/liveliness.rs | 2 +- zenoh/src/api/query.rs | 3 ++- zenoh/src/api/queryable.rs | 10 +++++++++- zenoh/src/api/sample.rs | 2 +- zenoh/src/api/session.rs | 15 +++++++++------ zenoh/src/api/subscriber.rs | 2 +- zenoh/src/lib.rs | 6 ++++++ zenoh/src/prelude.rs | 4 ++-- 13 files changed, 48 insertions(+), 31 deletions(-) diff --git a/zenoh/src/api.rs b/zenoh/src/api.rs index 14eb3ef2f2..ab38844ea6 100644 --- a/zenoh/src/api.rs +++ b/zenoh/src/api.rs @@ -20,9 +20,9 @@ pub(crate) mod encoding; pub(crate) mod handlers; pub(crate) mod info; pub(crate) mod key_expr; +#[cfg(feature = "unstable")] pub(crate) mod liveliness; pub(crate) mod payload; -#[cfg(feature = "unstable")] pub(crate) mod plugins; pub(crate) mod publication; pub(crate) mod query; diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 1a5d52fb4c..917afdc18f 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -11,13 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // +use crate::api::sample::Locality; use crate::{ - api::encoding::Encoding, - api::key_expr::KeyExpr, - api::queryable::Query, - api::sample::DataInfo, - prelude::sync::{Locality, SampleKind}, - Payload, Session, + api::encoding::Encoding, api::key_expr::KeyExpr, api::payload::Payload, api::queryable::Query, + api::sample::DataInfo, api::sample::SampleKind, api::session::Session, }; use std::{ collections::hash_map::DefaultHasher, diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 50a8c6ab42..cead1427f0 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -16,16 +16,15 @@ use std::future::Ready; use crate::api::builders::sample::SampleBuilderTrait; use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; use crate::api::key_expr::KeyExpr; -use crate::api::publication::Priority; +#[cfg(feature = "unstable")] +use crate::api::sample::Attachment; use crate::api::sample::Locality; use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::session::SessionRef; use crate::api::value::Value; -use crate::api::{ - encoding::Encoding, payload::Payload, publication::Publisher, sample::Attachment, -}; +use crate::api::{encoding::Encoding, payload::Payload, publication::Publisher}; use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::network::Mapping; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index c6ec22d8a3..e89a13606b 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -14,20 +14,20 @@ use std::marker::PhantomData; +use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; +use crate::api::payload::Payload; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::QoS; use crate::api::sample::QoSBuilder; +use crate::api::sample::Sample; +use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::value::Value; -use crate::Encoding; -use crate::Payload; -use crate::Priority; -use crate::Sample; -use crate::SampleKind; use uhlc::Timestamp; +use zenoh_config::Priority; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index a2edc8085c..bed9fd5b95 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -56,7 +56,10 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{api::session::Undeclarable, net::primitives::Primitives, prelude::Selector, Session}; +use crate::{ + api::session::Session, api::session::Undeclarable, net::primitives::Primitives, + prelude::Selector, +}; #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { @@ -552,7 +555,7 @@ impl<'a> KeyExpr<'a> { _ => false, } } - pub(crate) fn to_wire(&'a self, session: &crate::Session) -> WireExpr<'a> { + pub(crate) fn to_wire(&'a self, session: &Session) -> WireExpr<'a> { match &self.0 { KeyExprInner::Wire { key_expr, diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 51ff59e5cc..1f7d03ddca 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -15,6 +15,7 @@ //! Liveliness primitives. //! //! see [`Liveliness`] +use zenoh_core::Resolve; #[zenoh_macros::unstable] use { crate::{ @@ -40,7 +41,6 @@ use { zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo, zenoh_protocol::network::request, }; -use {zenoh_core::Resolve}; #[zenoh_macros::unstable] pub(crate) static PREFIX_LIVELINESS: &str = crate::net::routing::PREFIX_LIVELINESS; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index daefe6a59e..d5314bb204 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -18,11 +18,12 @@ use crate::api::key_expr::KeyExpr; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; +use crate::api::session::Session; use crate::prelude::*; -use crate::Session; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; +use zenoh_config::Priority; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_result::ZResult; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 37afb900aa..f8f58e5e66 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -18,7 +18,9 @@ use super::key_expr::KeyExpr; use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; use crate::api::handlers::{locked, DefaultHandler}; -use crate::api::sample::{QoSBuilder, SourceInfo}; +use crate::api::sample::QoSBuilder; +#[cfg(feature = "unstable")] +use crate::api::sample::SourceInfo; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; use crate::api::Id; @@ -31,6 +33,7 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; +use zenoh_config::Priority; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::{ core::{EntityId, WireExpr}, @@ -145,7 +148,9 @@ impl Query { encoding: Encoding::default(), }, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -183,7 +188,9 @@ impl Query { qos: response::ext::QoSType::RESPONSE.into(), kind: ReplyBuilderDelete, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -284,6 +291,7 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { } } +#[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] fn attachment>>(self, attachment: U) -> Self { diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 121780006c..8475e11ddf 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -18,12 +18,12 @@ use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; use crate::api::value::Value; use crate::payload::Payload; -use crate::Priority; #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; +use zenoh_config::Priority; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::core::Timestamp; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 54ea6a08d5..3e82bcd16e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -22,7 +22,10 @@ use crate::api::key_expr::KeyExpr; use crate::api::key_expr::KeyExprInner; #[zenoh_macros::unstable] use crate::api::liveliness::{Liveliness, LivelinessTokenState}; +use crate::api::payload::Payload; +#[zenoh_macros::unstable] use crate::api::publication::MatchingListenerState; +#[zenoh_macros::unstable] use crate::api::publication::MatchingStatus; use crate::api::query::GetBuilder; use crate::api::query::QueryState; @@ -34,7 +37,12 @@ use crate::api::queryable::QueryableState; use crate::api::sample::Attachment; use crate::api::sample::DataInfo; use crate::api::sample::DataInfoIntoSample; +use crate::api::sample::Locality; use crate::api::sample::QoS; +use crate::api::sample::Sample; +use crate::api::sample::SampleKind; +#[cfg(feature = "unstable")] +use crate::api::sample::SourceInfo; use crate::api::selector::Parameters; use crate::api::selector::Selector; use crate::api::selector::TIME_RANGE_KEY; @@ -45,15 +53,9 @@ use crate::api::Id; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; -use crate::payload::Payload; -use crate::prelude::Locality; use crate::publication::*; use crate::query::*; use crate::queryable::*; -use crate::Priority; -use crate::Sample; -use crate::SampleKind; -use crate::SourceInfo; use log::{error, trace, warn}; use std::collections::HashMap; use std::convert::TryFrom; @@ -71,6 +73,7 @@ use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_config::Config; use zenoh_config::Notifier; +use zenoh_config::Priority; use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; use zenoh_protocol::core::Reliability; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 735f582cf2..39e863b1fa 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -15,10 +15,10 @@ //! Subscribing primitives. use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; +use crate::api::sample::Locality; use crate::api::sample::Sample; use crate::api::session::Undeclarable; use crate::api::Id; -use crate::prelude::Locality; use crate::{api::session::SessionRef, Result as ZResult}; use std::fmt; use std::future::Ready; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 055be82543..c83c3bb598 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -149,6 +149,7 @@ pub mod key_expr { pub mod session { pub use crate::api::builders::publication::SessionDeleteBuilder; pub use crate::api::builders::publication::SessionPutBuilder; + #[zenoh_macros::unstable] pub use crate::api::session::init; pub use crate::api::session::open; pub use crate::api::session::Session; @@ -162,7 +163,9 @@ pub mod sample { pub use crate::api::builders::sample::SampleBuilderTrait; pub use crate::api::builders::sample::TimestampBuilderTrait; pub use crate::api::builders::sample::ValueBuilderTrait; + #[zenoh_macros::unstable] pub use crate::api::sample::Attachment; + #[zenoh_macros::unstable] pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; pub use crate::api::sample::SampleKind; @@ -212,7 +215,9 @@ pub mod publication { pub mod query { pub use crate::api::query::Mode; pub use crate::api::query::Reply; + #[zenoh_macros::unstable] pub use crate::api::query::ReplyKeyExpr; + #[zenoh_macros::unstable] pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; } @@ -236,6 +241,7 @@ pub mod scouting { pub use crate::api::scouting::WhatAmI; } +#[cfg(feature = "unstable")] pub mod liveliness { pub use crate::api::liveliness::Liveliness; pub use crate::api::liveliness::LivelinessSubscriberBuilder; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index d26c146d4c..e8c7e8a3b4 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -50,11 +50,11 @@ pub(crate) mod common { #[zenoh_macros::unstable] pub use crate::api::sample::Locality; + #[cfg(not(feature = "unstable"))] + pub(crate) use crate::api::sample::Locality; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; pub use crate::api::sample::{Sample, SampleKind}; - #[cfg(not(feature = "unstable"))] - pub(crate) use crate::sample::Locality; pub use crate::api::publication::Priority; #[zenoh_macros::unstable] From 22ece82e0dff4f691bba8e9e235ad0b4007b07e6 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 6 Apr 2024 16:23:11 +0200 Subject: [PATCH 139/357] priority import fix --- zenoh/src/api/builders/publication.rs | 1 + zenoh/src/api/builders/sample.rs | 2 +- zenoh/src/api/query.rs | 2 +- zenoh/src/api/queryable.rs | 2 +- zenoh/src/api/sample.rs | 2 +- zenoh/src/api/session.rs | 2 +- 6 files changed, 6 insertions(+), 5 deletions(-) diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index cead1427f0..20ceb086f9 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -13,6 +13,7 @@ use std::future::Ready; // Contributors: // ZettaScale Zenoh Team, // +use crate::api::publication::Priority; use crate::api::builders::sample::SampleBuilderTrait; use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; use crate::api::key_expr::KeyExpr; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index e89a13606b..838d67010e 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -26,8 +26,8 @@ use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::value::Value; +use crate::api::publication::Priority; use uhlc::Timestamp; -use zenoh_config::Priority; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index d5314bb204..59fd9cc9e8 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -23,7 +23,7 @@ use crate::prelude::*; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; -use zenoh_config::Priority; +use crate::api::publication::Priority; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_result::ZResult; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index f8f58e5e66..e1391968f0 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -33,7 +33,7 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; -use zenoh_config::Priority; +use crate::api::publication::Priority; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::{ core::{EntityId, WireExpr}, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 8475e11ddf..6c0a49646e 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -23,7 +23,7 @@ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; -use zenoh_config::Priority; +use crate::api::publication::Priority; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::core::Timestamp; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 3e82bcd16e..9221233262 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -73,7 +73,7 @@ use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_config::Config; use zenoh_config::Notifier; -use zenoh_config::Priority; +use crate::api::publication::Priority; use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; use zenoh_protocol::core::Reliability; From 84d42f4d0177e2a1690755345119a84ecec8a2f3 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sat, 6 Apr 2024 16:41:46 +0200 Subject: [PATCH 140/357] priority, endpoint fixes --- examples/examples/z_info.rs | 1 + zenoh/src/api/builders/publication.rs | 2 +- zenoh/src/api/builders/sample.rs | 2 +- zenoh/src/api/query.rs | 3 ++- zenoh/src/api/queryable.rs | 4 ++-- zenoh/src/api/sample.rs | 8 +++----- zenoh/src/api/session.rs | 2 +- zenoh/src/lib.rs | 4 ++-- zenoh/tests/connection_retry.rs | 2 +- zenoh/tests/matching.rs | 1 + zenoh/tests/unicity.rs | 1 + zenohd/src/main.rs | 3 ++- 12 files changed, 18 insertions(+), 15 deletions(-) diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index 1d047f9454..6a919d8d38 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; +use zenoh::config::ZenohId; #[tokio::main] async fn main() { diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 20ceb086f9..eb60021dbd 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -13,10 +13,10 @@ use std::future::Ready; // Contributors: // ZettaScale Zenoh Team, // -use crate::api::publication::Priority; use crate::api::builders::sample::SampleBuilderTrait; use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; use crate::api::key_expr::KeyExpr; +use crate::api::publication::Priority; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::Locality; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 838d67010e..2af1a0a71c 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -17,6 +17,7 @@ use std::marker::PhantomData; use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; use crate::api::payload::Payload; +use crate::api::publication::Priority; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; use crate::api::sample::QoS; @@ -26,7 +27,6 @@ use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::value::Value; -use crate::api::publication::Priority; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 59fd9cc9e8..e9bedbe2e5 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -15,6 +15,7 @@ //! Query primitives. use crate::api::handlers::{locked, Callback, DefaultHandler}; use crate::api::key_expr::KeyExpr; +use crate::api::publication::Priority; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; @@ -23,9 +24,9 @@ use crate::prelude::*; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; -use crate::api::publication::Priority; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_keyexpr::OwnedKeyExpr; +use zenoh_protocol::core::ZenohId; use zenoh_result::ZResult; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index e1391968f0..590b88bfa5 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -13,11 +13,11 @@ // //! Queryable primitives. - use super::key_expr::KeyExpr; use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; use crate::api::handlers::{locked, DefaultHandler}; +use crate::api::publication::Priority; use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; @@ -33,8 +33,8 @@ use std::future::Ready; use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; -use crate::api::publication::Priority; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_protocol::core::ZenohId; use zenoh_protocol::{ core::{EntityId, WireExpr}, network::{response, Mapping, RequestId, Response, ResponseFinal}, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 6c0a49646e..359af2a436 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -16,6 +16,7 @@ use crate::api::builders::sample::{QoSBuilderTrait, ValueBuilderTrait}; use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; +use crate::api::publication::Priority; use crate::api::value::Value; use crate::payload::Payload; #[zenoh_macros::unstable] @@ -23,7 +24,6 @@ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; -use crate::api::publication::Priority; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::core::Timestamp; @@ -154,10 +154,8 @@ pub struct SourceInfo { #[test] #[cfg(feature = "unstable")] fn source_info_stack_size() { - use crate::{ - api::sample::{SourceInfo, SourceSn}, - ZenohId, - }; + use crate::api::sample::{SourceInfo, SourceSn}; + use zenoh_protocol::core::ZenohId; assert_eq!(std::mem::size_of::(), 16); assert_eq!(std::mem::size_of::>(), 17); diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 9221233262..ae0593790e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -27,6 +27,7 @@ use crate::api::payload::Payload; use crate::api::publication::MatchingListenerState; #[zenoh_macros::unstable] use crate::api::publication::MatchingStatus; +use crate::api::publication::Priority; use crate::api::query::GetBuilder; use crate::api::query::QueryState; use crate::api::query::Reply; @@ -73,7 +74,6 @@ use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_config::Config; use zenoh_config::Notifier; -use crate::api::publication::Priority; use zenoh_core::Resolvable; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; use zenoh_protocol::core::Reliability; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c83c3bb598..0fd2c1b0f9 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -258,8 +258,8 @@ pub mod runtime { pub mod config { pub use zenoh_config::{ - client, default, peer, Config, ModeDependentValue, PermissionsConf, PluginLoad, - ValidatedMap, + client, default, peer, Config, EndPoint, Locator, ModeDependentValue, PermissionsConf, + PluginLoad, ValidatedMap, ZenohId, }; } diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 0a3ed01ce7..a62becfaa9 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,4 +1,4 @@ -use zenoh_config::{ConnectionRetryConf, ValidatedMap}; +use zenoh_config::{ConnectionRetryConf, ValidatedMap, EndPoint}; use zenoh::prelude::sync::*; diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index e56036f5de..6dd6835b7e 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -16,6 +16,7 @@ use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh_core::ztimeout; use zenoh_result::ZResult as Result; +use zenoh::config::Locator; const TIMEOUT: Duration = Duration::from_secs(60); const RECV_TIMEOUT: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 0033ed6468..374f773ff5 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -18,6 +18,7 @@ use tokio::runtime::Handle; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_core::ztimeout; +use zenoh::config::EndPoint; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 4faa10534c..781fc308df 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -15,10 +15,11 @@ use clap::Parser; use futures::future; use git_version::git_version; use std::collections::HashSet; +use zenoh::config::EndPoint; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, PluginLoad, ValidatedMap}; use zenoh::plugins::PluginsManager; -use zenoh::prelude::{EndPoint, WhatAmI}; use zenoh::runtime::{AdminSpace, Runtime}; +use zenoh::scouting::WhatAmI; use zenoh::Result; const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); From 314e708c7cd829e97ddaf07b14e85084f6016eed Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 11:33:06 +0200 Subject: [PATCH 141/357] relude endpoint commented --- zenoh/src/lib.rs | 2 -- zenoh/src/prelude.rs | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 0fd2c1b0f9..c6b06259ec 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -83,8 +83,6 @@ mod api; mod net; use git_version::git_version; -#[cfg(feature = "unstable")] -use prelude::*; use zenoh_util::concat_enabled_features; /// A zenoh error. diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index e8c7e8a3b4..7087270c42 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -32,7 +32,7 @@ pub(crate) mod common { // }; // pub use zenoh_core::Resolve; - pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; + // pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; #[zenoh_macros::unstable] pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; From 0eafd2bc04027fd3d0fd8975e2bc694ad504c2c3 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 11:40:56 +0200 Subject: [PATCH 142/357] entityid commented in prelude --- examples/examples/z_get_liveliness.rs | 1 + examples/examples/z_liveliness.rs | 1 + examples/examples/z_pub_shm.rs | 1 + examples/examples/z_sub_liveliness.rs | 1 + zenoh/src/api/publication.rs | 4 ++++ zenoh/src/api/queryable.rs | 4 +++- zenoh/src/prelude.rs | 4 ++-- 7 files changed, 13 insertions(+), 3 deletions(-) diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 487f3c25d6..5e6fd06c84 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -14,6 +14,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 937868e091..2a93f50db8 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 2aadcf33de..bc239ebf41 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -14,6 +14,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 50ba40c7ac..690299dbeb 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -13,6 +13,7 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 98064a1b99..08f4586a1b 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -38,6 +38,10 @@ use std::task::{Context, Poll}; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_keyexpr::keyexpr; pub use zenoh_protocol::core::CongestionControl; +#[zenoh_macros::unstable] +use zenoh_protocol::core::EntityGlobalId; +#[zenoh_macros::unstable] +use zenoh_protocol::core::EntityId; use zenoh_protocol::network::push::ext; use zenoh_protocol::network::Push; use zenoh_protocol::zenoh::Del; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 590b88bfa5..38ee61d5ff 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -13,10 +13,10 @@ // //! Queryable primitives. -use super::key_expr::KeyExpr; use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; use crate::api::handlers::{locked, DefaultHandler}; +use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] @@ -34,6 +34,8 @@ use std::ops::Deref; use std::sync::Arc; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +#[zenoh_macros::unstable] +use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::core::ZenohId; use zenoh_protocol::{ core::{EntityId, WireExpr}, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 7087270c42..112127271c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -33,8 +33,8 @@ pub(crate) mod common { // pub use zenoh_core::Resolve; // pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; - #[zenoh_macros::unstable] - pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; + // #[zenoh_macros::unstable] + // pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; pub use crate::config::{self, Config}; pub use crate::handlers::IntoHandler; From cfaef46a53cc721bebab2787346786ad453c58ad Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 11:52:58 +0200 Subject: [PATCH 143/357] config commented in prelude --- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 1 + plugins/zenoh-plugin-storage-manager/tests/operations.rs | 4 ++-- plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 4 ++-- zenoh/src/api/publication.rs | 2 ++ zenoh/src/prelude.rs | 2 +- zenoh/tests/attachments.rs | 2 ++ zenoh/tests/connection_retry.rs | 2 +- zenoh/tests/events.rs | 3 ++- zenoh/tests/handler.rs | 2 ++ zenoh/tests/interceptors.rs | 2 +- zenoh/tests/liveliness.rs | 1 + zenoh/tests/matching.rs | 9 +++++---- zenoh/tests/session.rs | 1 + zenoh/tests/unicity.rs | 3 ++- 14 files changed, 25 insertions(+), 13 deletions(-) diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 6a278c4784..366c6b7638 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -13,6 +13,7 @@ // use clap::{arg, Command}; use std::time::Duration; +use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index d3c6207496..1d16ec23ea 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,12 +20,12 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; -use zenoh::config::ValidatedMap; +use zenoh::config::{Config, ValidatedMap}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::session::Session; -use zenoh::{prelude::Config, time::Timestamp}; +use zenoh::time::Timestamp; use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 1b40a83cd5..4d8e72d55f 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,12 +21,12 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; -use zenoh::config::ValidatedMap; +use zenoh::config::{Config, ValidatedMap}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::session::Session; -use zenoh::{prelude::Config, time::Timestamp}; +use zenoh::time::Timestamp; use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 08f4586a1b..9a80366ce0 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -1090,6 +1090,8 @@ impl Drop for MatchingListenerInner<'_> { #[cfg(test)] mod tests { + use zenoh_config::Config; + #[test] fn priority_from() { use super::Priority as APrio; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 112127271c..fe4d02825a 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -36,7 +36,7 @@ pub(crate) mod common { // #[zenoh_macros::unstable] // pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; - pub use crate::config::{self, Config}; + // pub use crate::config::{self, Config}; pub use crate::handlers::IntoHandler; pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 7580984c8d..967397ea99 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -15,6 +15,7 @@ #[test] fn pubsub() { use zenoh::prelude::sync::*; + use zenoh_config::Config; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -62,6 +63,7 @@ fn pubsub() { #[test] fn queries() { use zenoh::{prelude::sync::*, sample::Attachment, sample::SampleBuilderTrait}; + use zenoh_config::Config; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index a62becfaa9..675b4eb879 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,4 +1,4 @@ -use zenoh_config::{ConnectionRetryConf, ValidatedMap, EndPoint}; +use zenoh_config::{Config, ConnectionRetryConf, EndPoint, ValidatedMap}; use zenoh::prelude::sync::*; diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 201f4941f9..380b3fcfbb 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -14,12 +14,13 @@ use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh_config::peer; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(10); async fn open_session(listen: &[&str], connect: &[&str]) -> Session { - let mut config = config::peer(); + let mut config = peer(); config.listen.endpoints = listen .iter() .map(|e| e.parse().unwrap()) diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index ceed15e2c3..8330c454ad 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -1,3 +1,5 @@ +use zenoh_config::Config; + // // Copyright (c) 2024 ZettaScale Technology // diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index a6eff19ec9..ecb6724e22 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use std::sync::{Arc, Mutex}; -use zenoh_config::ValidatedMap; +use zenoh_config::{Config, ValidatedMap}; use zenoh_core::zlock; struct IntervalCounter { diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 0e2870d808..4762c5cf91 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -13,6 +13,7 @@ // use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh_config as config; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 6dd6835b7e..c678c423d0 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -13,10 +13,12 @@ // use std::str::FromStr; use std::time::Duration; +use zenoh::config::Locator; use zenoh::prelude::r#async::*; +use zenoh_config as config; +use zenoh_config::peer; use zenoh_core::ztimeout; use zenoh_result::ZResult as Result; -use zenoh::config::Locator; const TIMEOUT: Duration = Duration::from_secs(60); const RECV_TIMEOUT: Duration = Duration::from_secs(1); @@ -104,10 +106,9 @@ async fn zenoh_matching_status_any() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_remote() -> Result<()> { use flume::RecvTimeoutError; + let session1 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); - let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); - - let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_remote_test") diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 35f3dac2c6..bae00d37f2 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh_config as config; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 374f773ff5..6ba59ef242 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -15,10 +15,11 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::runtime::Handle; +use zenoh::config::EndPoint; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh_config as config; use zenoh_core::ztimeout; -use zenoh::config::EndPoint; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From af656d50810aec1ebfe24f2be186ef25c0abd457 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 11:59:55 +0200 Subject: [PATCH 144/357] into handler removed --- zenoh-ext/src/querying_subscriber.rs | 6 +++--- zenoh/src/api/liveliness.rs | 2 ++ zenoh/src/api/publication.rs | 2 +- zenoh/src/api/query.rs | 2 +- zenoh/src/api/queryable.rs | 4 ++-- zenoh/src/api/scouting.rs | 10 +++++----- zenoh/src/api/subscriber.rs | 2 +- zenoh/src/prelude.rs | 2 +- 8 files changed, 16 insertions(+), 14 deletions(-) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index bdcab8f220..90491e58b9 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -17,7 +17,7 @@ use std::future::Ready; use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; -use zenoh::handlers::{locked, DefaultHandler}; +use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; @@ -107,7 +107,7 @@ impl<'a, 'b, KeySpace> QueryingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandle handler: Handler, ) -> QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> where - Handler: zenoh::prelude::IntoHandler<'static, Sample>, + Handler: IntoHandler<'static, Sample>, { let QueryingSubscriberBuilder { session, @@ -464,7 +464,7 @@ where handler: Handler, ) -> FetchingSubscriberBuilder<'a, 'b, KeySpace, Handler, Fetch, TryIntoSample> where - Handler: zenoh::prelude::IntoHandler<'static, Sample>, + Handler: IntoHandler<'static, Sample>, { let FetchingSubscriberBuilder { session, diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 1f7d03ddca..2177a6ce5b 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -15,7 +15,9 @@ //! Liveliness primitives. //! //! see [`Liveliness`] +use crate::api::handlers::IntoHandler; use zenoh_core::Resolve; + #[zenoh_macros::unstable] use { crate::{ diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 9a80366ce0..3f3fedc3c4 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -875,7 +875,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> MatchingListenerBuilder<'a, Handler> where - Handler: crate::prelude::IntoHandler<'static, MatchingStatus>, + Handler: IntoHandler<'static, MatchingStatus>, { let MatchingListenerBuilder { publisher, diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index e9bedbe2e5..e8729e3803 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -13,7 +13,7 @@ // //! Query primitives. -use crate::api::handlers::{locked, Callback, DefaultHandler}; +use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; #[zenoh_macros::unstable] diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 38ee61d5ff..f1ac55413d 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -15,7 +15,7 @@ //! Queryable primitives. use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; -use crate::api::handlers::{locked, DefaultHandler}; +use crate::api::handlers::{locked, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; use crate::api::sample::QoSBuilder; @@ -755,7 +755,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> QueryableBuilder<'a, 'b, Handler> where - Handler: crate::prelude::IntoHandler<'static, Query>, + Handler: IntoHandler<'static, Query>, { let QueryableBuilder { session, diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 56f8d4c1a4..bcc1482f1b 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::handlers::{locked, Callback, DefaultHandler}; +use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::net::runtime::{orchestrator::Loop, Runtime}; use futures::StreamExt; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; @@ -139,7 +139,7 @@ impl ScoutBuilder { #[inline] pub fn with(self, handler: Handler) -> ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello>, + Handler: IntoHandler<'static, Hello>, { let ScoutBuilder { what, @@ -156,7 +156,7 @@ impl ScoutBuilder { impl Resolvable for ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler: IntoHandler<'static, Hello> + Send, Handler::Handler: Send, { type To = ZResult>; @@ -164,7 +164,7 @@ where impl SyncResolve for ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler: IntoHandler<'static, Hello> + Send, Handler::Handler: Send, { fn res_sync(self) -> ::To { @@ -175,7 +175,7 @@ where impl AsyncResolve for ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello> + Send, + Handler: IntoHandler<'static, Hello> + Send, Handler::Handler: Send, { type Future = Ready; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 39e863b1fa..34df5569f4 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -309,7 +309,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Handler> where - Handler: crate::prelude::IntoHandler<'static, Sample>, + Handler: IntoHandler<'static, Sample>, { let SubscriberBuilder { session, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index fe4d02825a..2ba784d06f 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -37,7 +37,7 @@ pub(crate) mod common { // pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; // pub use crate::config::{self, Config}; - pub use crate::handlers::IntoHandler; + // pub use crate::handlers::IntoHandler; pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; From 045823959b7e1e90337a23bcb608e17518d7a18d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 12:18:29 +0200 Subject: [PATCH 145/357] api.rs to mod.rs --- zenoh/src/{api.rs => api/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{api.rs => api/mod.rs} (100%) diff --git a/zenoh/src/api.rs b/zenoh/src/api/mod.rs similarity index 100% rename from zenoh/src/api.rs rename to zenoh/src/api/mod.rs From 4dfb118a09dc987747053890d72bc5edaedfd1f4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 12:39:45 +0200 Subject: [PATCH 146/357] parameters commented in prelude --- examples/examples/z_get.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/lib.rs | 1 + .../src/replica/align_queryable.rs | 1 + zenoh-ext/src/publication_cache.rs | 1 + zenoh-ext/src/querying_subscriber.rs | 1 + zenoh/src/api/key_expr.rs | 4 ++-- zenoh/src/api/plugins.rs | 2 +- zenoh/src/api/query.rs | 1 + zenoh/src/api/queryable.rs | 2 ++ zenoh/src/net/runtime/adminspace.rs | 4 ++-- zenoh/src/prelude.rs | 2 +- 12 files changed, 15 insertions(+), 7 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 8735ae8daa..4ac31df3e4 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -15,6 +15,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh::selector::Selector; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index e2434c644c..a9dbdce912 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -35,7 +35,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; -use zenoh::selector::TIME_RANGE_KEY; +use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; use zenoh::session::Session; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 7d679ef37d..12fbede21d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -31,6 +31,7 @@ use zenoh::key_expr::keyexpr; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::sync::*; use zenoh::runtime::Runtime; +use zenoh::selector::Selector; use zenoh::session::Session; use zenoh_backend_traits::config::ConfigDiff; use zenoh_backend_traits::config::PluginConfig; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index d73b9b2b6d..89769bea1c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -21,6 +21,7 @@ use std::str::FromStr; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::selector::Selector; use zenoh::session::Session; use zenoh::time::Timestamp; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index e1a974b6ff..ba1491aa6f 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -18,6 +18,7 @@ use std::future::Ready; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; +use zenoh::selector::Parameters; use zenoh::session::SessionRef; use zenoh::subscriber::FlumeSubscriber; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 90491e58b9..8b98483de3 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -22,6 +22,7 @@ use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; use zenoh::sample::SampleBuilder; +use zenoh::selector::Selector; use zenoh::session::SessionRef; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index bed9fd5b95..1f381486a0 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -57,8 +57,8 @@ use zenoh_protocol::{ use zenoh_result::ZResult; use crate::{ - api::session::Session, api::session::Undeclarable, net::primitives::Primitives, - prelude::Selector, + api::selector::Selector, api::session::Session, api::session::Undeclarable, + net::primitives::Primitives, }; #[derive(Clone, Debug)] diff --git a/zenoh/src/api/plugins.rs b/zenoh/src/api/plugins.rs index 3684324cf2..36cde5ba34 100644 --- a/zenoh/src/api/plugins.rs +++ b/zenoh/src/api/plugins.rs @@ -14,7 +14,7 @@ //! `zenohd`'s plugin system. For more details, consult the [detailed documentation](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Plugins/Zenoh%20Plugins.md). -use crate::{net::runtime::Runtime, prelude::Selector}; +use crate::{api::selector::Selector, net::runtime::Runtime}; use zenoh_core::zconfigurable; use zenoh_plugin_trait::{ diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index e8729e3803..76ee714828 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -19,6 +19,7 @@ use crate::api::publication::Priority; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; +use crate::api::selector::Selector; use crate::api::session::Session; use crate::prelude::*; use std::collections::HashMap; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index f1ac55413d..7319575d89 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -21,6 +21,8 @@ use crate::api::publication::Priority; use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; +use crate::api::selector::Parameters; +use crate::api::selector::Selector; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; use crate::api::Id; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index a87fea5f08..be992621e6 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -530,7 +530,7 @@ fn router_data(context: &AdminContext, query: Query) { }); #[cfg(feature = "stats")] { - let stats = crate::prelude::Parameters::decode(&query.selector()) + let stats = crate::api::selector::Parameters::decode(&query.selector()) .any(|(k, v)| k.as_ref() == "_stats" && v != "false"); if stats { json.as_object_mut().unwrap().insert( @@ -561,7 +561,7 @@ fn router_data(context: &AdminContext, query: Query) { #[cfg(feature = "stats")] { - let stats = crate::prelude::Parameters::decode(&query.selector()) + let stats = crate::api::selector::Parameters::decode(&query.selector()) .any(|(k, v)| k.as_ref() == "_stats" && v != "false"); if stats { json.as_object_mut().unwrap().insert( diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 2ba784d06f..265f4f2c48 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -38,7 +38,7 @@ pub(crate) mod common { // pub use crate::config::{self, Config}; // pub use crate::handlers::IntoHandler; - pub use crate::selector::{Parameter, Parameters, Selector}; + // pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; From 2f0c7b405f883cf926c969a5a56cc6d9b673737b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:02:25 +0200 Subject: [PATCH 147/357] session commented in api --- examples/examples/z_forward.rs | 1 + examples/examples/z_info.rs | 3 ++- examples/examples/z_ping.rs | 1 + examples/examples/z_pong.rs | 1 + examples/examples/z_pub.rs | 1 + examples/examples/z_pub_thr.rs | 1 + examples/examples/z_pull.rs | 5 ++++- examples/examples/z_queryable.rs | 1 + examples/examples/z_storage.rs | 1 + examples/examples/z_sub.rs | 1 + examples/examples/z_sub_thr.rs | 1 + plugins/zenoh-plugin-example/src/lib.rs | 1 + plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 2 +- .../src/replica/align_queryable.rs | 1 + plugins/zenoh-plugin-storage-manager/src/replica/mod.rs | 1 + .../zenoh-plugin-storage-manager/src/replica/storage.rs | 1 + zenoh-ext/examples/z_query_sub.rs | 1 + zenoh-ext/src/group.rs | 1 + zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/api/liveliness.rs | 2 +- zenoh/src/api/publication.rs | 2 ++ zenoh/src/prelude.rs | 2 +- zenoh/tests/attachments.rs | 8 ++++++-- zenoh/tests/events.rs | 1 + zenoh/tests/handler.rs | 1 + zenoh/tests/interceptors.rs | 1 + zenoh/tests/liveliness.rs | 2 ++ zenoh/tests/matching.rs | 4 ++++ zenoh/tests/routing.rs | 1 + zenoh/tests/session.rs | 1 + zenoh/tests/unicity.rs | 1 + 33 files changed, 47 insertions(+), 9 deletions(-) diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 349690c8a8..a4c3cb4ced 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index 6a919d8d38..c63e5974e9 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -13,9 +13,10 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::config::ZenohId; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; -use zenoh::config::ZenohId; #[tokio::main] async fn main() { diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index a989b34482..6070fb1e94 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -17,6 +17,7 @@ use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 60f6db0b68..7446456938 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -16,6 +16,7 @@ use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 176e991fff..10209b04e6 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -16,6 +16,7 @@ use std::time::Duration; use zenoh::config::Config; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 4354ad2e68..b23e3ce1bd 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -16,6 +16,7 @@ use clap::Parser; use std::convert::TryInto; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 3d4ff30e2b..6a07de8358 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,7 +13,10 @@ // use clap::Parser; use std::time::Duration; -use zenoh::{config::Config, handlers::RingBuffer, key_expr::KeyExpr, prelude::r#async::*}; +use zenoh::{ + config::Config, handlers::RingBuffer, key_expr::KeyExpr, prelude::r#async::*, + session::SessionDeclarations, +}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 025f3cc1cc..ac58d9f094 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 50d84001a8..4c6ed0ede5 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; use zenoh::config::Config; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index f7e232f240..df77429356 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -15,6 +15,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 0a8426edf0..b4b6ecd0e5 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -15,6 +15,7 @@ use clap::Parser; use std::time::Instant; use zenoh::config::Config; use zenoh::prelude::sync::*; +use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; struct Stats { diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 9d25f582fb..a1e2af6574 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -26,6 +26,7 @@ use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; +use zenoh::session::SessionDeclarations; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 366c6b7638..28627999f3 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -17,6 +17,7 @@ use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; +use zenoh::session::SessionDeclarations; const HTML: &str = r#"

diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index a9dbdce912..c69f83794b 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -36,7 +36,7 @@ use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; -use zenoh::session::Session; +use zenoh::session::{Session, SessionDeclarations}; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 89769bea1c..792b0f351f 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -23,6 +23,7 @@ use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::selector::Selector; use zenoh::session::Session; +use zenoh::session::SessionDeclarations; use zenoh::time::Timestamp; pub struct AlignQueryable { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 5289fc47af..c24d9b1fef 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -30,6 +30,7 @@ use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::session::Session; +use zenoh::session::SessionDeclarations; use zenoh::time::Timestamp; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 007b21083b..88b21f0a0c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -32,6 +32,7 @@ use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::SampleBuilder; use zenoh::sample::{Sample, SampleKind}; +use zenoh::session::SessionDeclarations; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; use zenoh::{session::Session, Result as ZResult}; diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index d88519789b..61ea0eac92 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -16,6 +16,7 @@ use clap::Command; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::query::ReplyKeyExpr; +use zenoh::session::SessionDeclarations; use zenoh_ext::*; #[tokio::main] diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 7ede485784..5e9b9e66f3 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -33,6 +33,7 @@ use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; use zenoh::session::Session; +use zenoh::session::SessionDeclarations; use zenoh::Error as ZError; use zenoh::Result as ZResult; use zenoh_result::bail; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index ba1491aa6f..85d3157d3c 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -19,7 +19,7 @@ use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; use zenoh::selector::Parameters; -use zenoh::session::SessionRef; +use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_result::{bail, ZResult}; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 8b98483de3..e58786628f 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -23,7 +23,7 @@ use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; use zenoh::sample::SampleBuilder; use zenoh::selector::Selector; -use zenoh::session::SessionRef; +use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; use zenoh::Result as ZResult; diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 2177a6ce5b..634aedd1fb 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -16,8 +16,8 @@ //! //! see [`Liveliness`] use crate::api::handlers::IntoHandler; +use crate::api::session::Session; use zenoh_core::Resolve; - #[zenoh_macros::unstable] use { crate::{ diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 3f3fedc3c4..db90c1dc22 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -1092,6 +1092,8 @@ impl Drop for MatchingListenerInner<'_> { mod tests { use zenoh_config::Config; + use crate::api::session::SessionDeclarations; + #[test] fn priority_from() { use super::Priority as APrio; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 265f4f2c48..9863f42a5c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -39,7 +39,7 @@ pub(crate) mod common { // pub use crate::config::{self, Config}; // pub use crate::handlers::IntoHandler; // pub use crate::selector::{Parameter, Parameters, Selector}; - pub use crate::session::{Session, SessionDeclarations}; + // pub use crate::session::{Session, SessionDeclarations}; pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 967397ea99..073d5537bd 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,7 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::prelude::sync::*; + use zenoh::{prelude::sync::*, session::SessionDeclarations}; use zenoh_config::Config; let zenoh = zenoh::open(Config::default()).res().unwrap(); @@ -62,7 +62,11 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{prelude::sync::*, sample::Attachment, sample::SampleBuilderTrait}; + use zenoh::{ + prelude::sync::*, + sample::{Attachment, SampleBuilderTrait}, + session::SessionDeclarations, + }; use zenoh_config::Config; let zenoh = zenoh::open(Config::default()).res().unwrap(); diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 380b3fcfbb..aafdfdf7d5 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -14,6 +14,7 @@ use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::session::{Session, SessionDeclarations}; use zenoh_config::peer; use zenoh_core::ztimeout; diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index 8330c454ad..fdb8e225fa 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -1,3 +1,4 @@ +use zenoh::session::SessionDeclarations; use zenoh_config::Config; // diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index ecb6724e22..1ff1f49651 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::sync::{Arc, Mutex}; +use zenoh::session::SessionDeclarations; use zenoh_config::{Config, ValidatedMap}; use zenoh_core::zlock; diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 4762c5cf91..0af0b64164 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -22,6 +22,8 @@ const SLEEP: Duration = Duration::from_secs(1); #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_liveliness() { + use zenoh::session::SessionDeclarations; + let mut c1 = config::peer(); c1.listen .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index c678c423d0..304af977d2 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -15,6 +15,7 @@ use std::str::FromStr; use std::time::Duration; use zenoh::config::Locator; use zenoh::prelude::r#async::*; +use zenoh::session::Session; use zenoh_config as config; use zenoh_config::peer; use zenoh_core::ztimeout; @@ -45,6 +46,7 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_any() -> Result<()> { use flume::RecvTimeoutError; + use zenoh::session::SessionDeclarations; let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; @@ -106,6 +108,7 @@ async fn zenoh_matching_status_any() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_remote() -> Result<()> { use flume::RecvTimeoutError; + use zenoh::session::SessionDeclarations; let session1 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); let session2 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); @@ -169,6 +172,7 @@ async fn zenoh_matching_status_remote() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_local() -> Result<()> { use flume::RecvTimeoutError; + use zenoh::session::SessionDeclarations; let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 9803d62c4e..b0d789312a 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -19,6 +19,7 @@ use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; use zenoh::sample::QoSBuilderTrait; +use zenoh::session::{Session, SessionDeclarations}; use zenoh::Result; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index bae00d37f2..54337729ae 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::{Session, SessionDeclarations}; use zenoh_config as config; use zenoh_core::ztimeout; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 6ba59ef242..f37f6cb852 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -18,6 +18,7 @@ use tokio::runtime::Handle; use zenoh::config::EndPoint; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::session::{Session, SessionDeclarations}; use zenoh_config as config; use zenoh_core::ztimeout; From bd717db83cc2f9d1cb051090db8de904348bf0c9 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:05:57 +0200 Subject: [PATCH 148/357] query commented in prelude --- examples/examples/z_get.rs | 1 + zenoh/src/api/liveliness.rs | 2 ++ zenoh/src/prelude.rs | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 4ac31df3e4..6326ddf6c6 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -15,6 +15,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh::query::QueryTarget; use zenoh::selector::Selector; use zenoh_examples::CommonArgs; diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 634aedd1fb..17cb2246b0 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -16,8 +16,10 @@ //! //! see [`Liveliness`] use crate::api::handlers::IntoHandler; +use crate::api::query::{QueryConsolidation, QueryTarget}; use crate::api::session::Session; use zenoh_core::Resolve; + #[zenoh_macros::unstable] use { crate::{ diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 9863f42a5c..14ccee0252 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -41,7 +41,7 @@ pub(crate) mod common { // pub use crate::selector::{Parameter, Parameters, Selector}; // pub use crate::session::{Session, SessionDeclarations}; - pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + // pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; pub use crate::api::encoding::Encoding; pub use crate::api::value::Value; From 42fd23fb2ca416eef028aee0db8f039d87e3df90 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:08:59 +0200 Subject: [PATCH 149/357] encoding commented in prelude --- plugins/zenoh-plugin-rest/src/lib.rs | 1 + zenoh/src/api/publication.rs | 1 + zenoh/src/api/query.rs | 1 + zenoh/src/prelude.rs | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index c69f83794b..5ca7169dfd 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,6 +29,7 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; +use zenoh::encoding::Encoding; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index db90c1dc22..6a52e3bde2 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -17,6 +17,7 @@ use crate::api::builders::publication::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, PublisherPutBuilder, }; +use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; #[zenoh_macros::unstable] use crate::api::sample::Attachment; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 76ee714828..20ebfd2f70 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -13,6 +13,7 @@ // //! Query primitives. +use crate::api::encoding::Encoding; use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 14ccee0252..53eed06f1f 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -43,7 +43,7 @@ pub(crate) mod common { // pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; - pub use crate::api::encoding::Encoding; + // pub use crate::api::encoding::Encoding; pub use crate::api::value::Value; /// The encoding of a zenoh `Value`. pub use crate::payload::{Deserialize, Payload, Serialize}; From 0faa5c21c3098f456cbf395a6ce073ceaffd58ac Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:15:08 +0200 Subject: [PATCH 150/357] value commented in prelude --- plugins/zenoh-plugin-rest/src/lib.rs | 1 + plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs | 2 +- .../zenoh-plugin-storage-manager/src/replica/align_queryable.rs | 1 + plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs | 1 + zenoh/src/api/query.rs | 1 + zenoh/src/api/queryable.rs | 1 + zenoh/src/prelude.rs | 2 +- zenoh/tests/session.rs | 1 + 8 files changed, 8 insertions(+), 2 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 5ca7169dfd..0b855a54fc 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -38,6 +38,7 @@ use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; use zenoh::session::{Session, SessionDeclarations}; +use zenoh::value::Value; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index d9f330ea8c..0924279cb2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -16,8 +16,8 @@ use async_trait::async_trait; use std::collections::HashMap; use std::sync::Arc; use zenoh::key_expr::OwnedKeyExpr; -use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; +use zenoh::value::Value; use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; use zenoh_backend_traits::*; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 792b0f351f..00d198bef6 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -25,6 +25,7 @@ use zenoh::selector::Selector; use zenoh::session::Session; use zenoh::session::SessionDeclarations; use zenoh::time::Timestamp; +use zenoh::value::Value; pub struct AlignQueryable { session: Arc, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 0553710851..0d750c8810 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -24,6 +24,7 @@ use zenoh::prelude::r#async::*; use zenoh::sample::SampleBuilder; use zenoh::session::Session; use zenoh::time::Timestamp; +use zenoh::value::Value; pub struct Aligner { session: Arc, diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 20ebfd2f70..5f881053a2 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -22,6 +22,7 @@ use crate::api::sample::Attachment; use crate::api::sample::QoSBuilder; use crate::api::selector::Selector; use crate::api::session::Session; +use crate::api::value::Value; use crate::prelude::*; use std::collections::HashMap; use std::future::Ready; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 7319575d89..db41c6a815 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -25,6 +25,7 @@ use crate::api::selector::Parameters; use crate::api::selector::Selector; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; +use crate::api::value::Value; use crate::api::Id; use crate::net::primitives::Primitives; use crate::prelude::*; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 53eed06f1f..75f96ea1ed 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -44,7 +44,7 @@ pub(crate) mod common { // pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; // pub use crate::api::encoding::Encoding; - pub use crate::api::value::Value; + // pub use crate::api::value::Value; /// The encoding of a zenoh `Value`. pub use crate::payload::{Deserialize, Payload, Serialize}; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 54337729ae..f36775bca2 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -17,6 +17,7 @@ use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::session::{Session, SessionDeclarations}; +use zenoh::value::Value; use zenoh_config as config; use zenoh_core::ztimeout; From 33e3d7eaecac8531d1f651e5f67f9cd6a0ceb8b4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:17:40 +0200 Subject: [PATCH 151/357] payload commented in prelude --- examples/examples/z_ping.rs | 1 + examples/examples/z_pub_thr.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 2 +- zenoh/src/api/publication.rs | 1 + zenoh/src/api/query.rs | 1 + zenoh/src/api/queryable.rs | 1 + zenoh/src/prelude.rs | 2 +- 7 files changed, 7 insertions(+), 2 deletions(-) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 6070fb1e94..ef9bc08617 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -15,6 +15,7 @@ use clap::Parser; use std::time::{Duration, Instant}; use zenoh::config::Config; use zenoh::key_expr::keyexpr; +use zenoh::payload::Payload; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh::session::SessionDeclarations; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index b23e3ce1bd..75e2d72fbd 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -14,6 +14,7 @@ use clap::Parser; use std::convert::TryInto; +use zenoh::payload::Payload; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh::session::SessionDeclarations; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 0b855a54fc..83085fd449 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -31,7 +31,7 @@ use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; use zenoh::encoding::Encoding; use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::payload::StringOrBase64; +use zenoh::payload::{Payload, StringOrBase64}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 6a52e3bde2..83f813eb84 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -19,6 +19,7 @@ use crate::api::builders::publication::{ }; use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; +use crate::api::payload::Payload; #[zenoh_macros::unstable] use crate::api::sample::Attachment; use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 5f881053a2..bc1cacd769 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -16,6 +16,7 @@ use crate::api::encoding::Encoding; use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; +use crate::api::payload::Payload; use crate::api::publication::Priority; #[zenoh_macros::unstable] use crate::api::sample::Attachment; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index db41c6a815..3ab8264beb 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -17,6 +17,7 @@ use crate::api::builders::sample::SampleBuilder; use crate::api::encoding::Encoding; use crate::api::handlers::{locked, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; +use crate::api::payload::Payload; use crate::api::publication::Priority; use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 75f96ea1ed..443ec47545 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -46,7 +46,7 @@ pub(crate) mod common { // pub use crate::api::encoding::Encoding; // pub use crate::api::value::Value; /// The encoding of a zenoh `Value`. - pub use crate::payload::{Deserialize, Payload, Serialize}; + // pub use crate::payload::{Deserialize, Payload, Serialize}; #[zenoh_macros::unstable] pub use crate::api::sample::Locality; From 0ededb147fed2bef0baabb146bf765844b0622b2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:30:06 +0200 Subject: [PATCH 152/357] locality commented in prelude --- plugins/zenoh-plugin-storage-manager/src/replica/mod.rs | 1 + zenoh-ext/src/publication_cache.rs | 1 + zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh/src/api/liveliness.rs | 1 + zenoh/src/api/publication.rs | 4 +++- zenoh/src/api/query.rs | 1 + zenoh/src/api/queryable.rs | 1 + zenoh/src/prelude.rs | 6 ++---- zenoh/tests/matching.rs | 6 +++--- 9 files changed, 14 insertions(+), 9 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index c24d9b1fef..7192e3ab7e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -29,6 +29,7 @@ use urlencoding::encode; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::sample::Locality; use zenoh::session::Session; use zenoh::session::SessionDeclarations; use zenoh::time::Timestamp; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 85d3157d3c..733509f619 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -18,6 +18,7 @@ use std::future::Ready; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; +use zenoh::sample::Locality; use zenoh::selector::Parameters; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index e58786628f..23ba054ded 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -21,7 +21,7 @@ use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::SampleBuilder; +use zenoh::sample::{Locality, SampleBuilder}; use zenoh::selector::Selector; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 17cb2246b0..5c151315ab 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -17,6 +17,7 @@ //! see [`Liveliness`] use crate::api::handlers::IntoHandler; use crate::api::query::{QueryConsolidation, QueryTarget}; +use crate::api::sample::Locality; use crate::api::session::Session; use zenoh_core::Resolve; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 83f813eb84..3d9df2b5bb 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -22,11 +22,13 @@ use crate::api::key_expr::KeyExpr; use crate::api::payload::Payload; #[zenoh_macros::unstable] use crate::api::sample::Attachment; +use crate::api::sample::Locality; +#[zenoh_macros::unstable] +use crate::api::sample::SourceInfo; use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; use crate::api::session::SessionRef; use crate::api::session::Undeclarable; use crate::net::primitives::Primitives; -use crate::prelude::*; #[cfg(feature = "unstable")] use crate::{ api::handlers::{Callback, DefaultHandler, IntoHandler}, diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index bc1cacd769..38e8a12b88 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -20,6 +20,7 @@ use crate::api::payload::Payload; use crate::api::publication::Priority; #[zenoh_macros::unstable] use crate::api::sample::Attachment; +use crate::api::sample::Locality; use crate::api::sample::QoSBuilder; use crate::api::selector::Selector; use crate::api::session::Session; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 3ab8264beb..bec6b5e29b 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -19,6 +19,7 @@ use crate::api::handlers::{locked, DefaultHandler, IntoHandler}; use crate::api::key_expr::KeyExpr; use crate::api::payload::Payload; use crate::api::publication::Priority; +use crate::api::sample::Locality; use crate::api::sample::QoSBuilder; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 443ec47545..66b1b07adf 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -48,10 +48,8 @@ pub(crate) mod common { /// The encoding of a zenoh `Value`. // pub use crate::payload::{Deserialize, Payload, Serialize}; - #[zenoh_macros::unstable] - pub use crate::api::sample::Locality; - #[cfg(not(feature = "unstable"))] - pub(crate) use crate::api::sample::Locality; + // #[zenoh_macros::unstable] + // pub use crate::api::sample::Locality; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; pub use crate::api::sample::{Sample, SampleKind}; diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 304af977d2..b22d8dd1c8 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -46,7 +46,7 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_any() -> Result<()> { use flume::RecvTimeoutError; - use zenoh::session::SessionDeclarations; + use zenoh::{sample::Locality, session::SessionDeclarations}; let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; @@ -108,7 +108,7 @@ async fn zenoh_matching_status_any() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_remote() -> Result<()> { use flume::RecvTimeoutError; - use zenoh::session::SessionDeclarations; + use zenoh::{sample::Locality, session::SessionDeclarations}; let session1 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); let session2 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); @@ -172,7 +172,7 @@ async fn zenoh_matching_status_remote() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_local() -> Result<()> { use flume::RecvTimeoutError; - use zenoh::session::SessionDeclarations; + use zenoh::{sample::Locality, session::SessionDeclarations}; let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); From dd6c1dd009c9f47d4abb4d7145ca98b93d907743 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 13:40:38 +0200 Subject: [PATCH 153/357] sourceinfo commented out --- zenoh/src/api/liveliness.rs | 1 + zenoh/src/api/query.rs | 1 + zenoh/src/prelude.rs | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 5c151315ab..ad4cd3f7ec 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -18,6 +18,7 @@ use crate::api::handlers::IntoHandler; use crate::api::query::{QueryConsolidation, QueryTarget}; use crate::api::sample::Locality; +use crate::api::sample::SourceInfo; use crate::api::session::Session; use zenoh_core::Resolve; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 38e8a12b88..5ef4c14ce7 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -22,6 +22,7 @@ use crate::api::publication::Priority; use crate::api::sample::Attachment; use crate::api::sample::Locality; use crate::api::sample::QoSBuilder; +use crate::api::sample::SourceInfo; use crate::api::selector::Selector; use crate::api::session::Session; use crate::api::value::Value; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 66b1b07adf..77f4087d3f 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -50,8 +50,8 @@ pub(crate) mod common { // #[zenoh_macros::unstable] // pub use crate::api::sample::Locality; - #[zenoh_macros::unstable] - pub use crate::api::sample::SourceInfo; + // #[zenoh_macros::unstable] + // pub use crate::api::sample::SourceInfo; pub use crate::api::sample::{Sample, SampleKind}; pub use crate::api::publication::Priority; From 9c1a1f9a0be7adfbe92adb96fa225c2f69e47c66 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 14:31:05 +0200 Subject: [PATCH 154/357] relative use --- zenoh/src/api/admin.rs | 12 ++-- zenoh/src/api/encoding.rs | 2 +- zenoh/src/api/handlers.rs | 2 +- zenoh/src/api/info.rs | 2 +- zenoh/src/api/key_expr.rs | 10 +-- zenoh/src/api/liveliness.rs | 45 ++++--------- zenoh/src/api/plugins.rs | 4 +- zenoh/src/api/publication.rs | 64 +++++++++--------- zenoh/src/api/query.rs | 35 +++++----- zenoh/src/api/queryable.rs | 54 +++++++-------- zenoh/src/api/sample.rs | 25 ++++--- zenoh/src/api/scouting.rs | 8 +-- zenoh/src/api/selector.rs | 10 ++- zenoh/src/api/session.rs | 125 ++++++++++++++--------------------- zenoh/src/api/subscriber.rs | 31 ++++----- zenoh/src/api/time.rs | 1 - zenoh/src/api/value.rs | 3 +- zenoh/src/lib.rs | 10 ++- 18 files changed, 200 insertions(+), 243 deletions(-) diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 917afdc18f..74c913b419 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -11,10 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::sample::Locality; -use crate::{ - api::encoding::Encoding, api::key_expr::KeyExpr, api::payload::Payload, api::queryable::Query, - api::sample::DataInfo, api::sample::SampleKind, api::session::Session, +use super::{ + encoding::Encoding, + key_expr::KeyExpr, + payload::Payload, + queryable::Query, + sample::Locality, + sample::{DataInfo, SampleKind}, + session::Session, }; use std::{ collections::hash_map::DefaultHasher, diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index d9fa725ed5..7518671eed 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::payload::Payload; +use super::payload::Payload; use phf::phf_map; use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; use zenoh_buffers::{ZBuf, ZSlice}; diff --git a/zenoh/src/api/handlers.rs b/zenoh/src/api/handlers.rs index 6aecda34b9..7610fe43d8 100644 --- a/zenoh/src/api/handlers.rs +++ b/zenoh/src/api/handlers.rs @@ -13,7 +13,7 @@ // //! Callback handler trait. -use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; +use super::session::API_DATA_RECEPTION_CHANNEL_SIZE; use std::sync::{Arc, Mutex, Weak}; use zenoh_collections::RingBuffer as RingBufferInner; use zenoh_result::ZResult; diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 1f7a903ba4..dbcad9c50c 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -13,7 +13,7 @@ // //! Tools to access information about the current zenoh [`Session`](crate::Session). -use crate::api::session::SessionRef; +use super::session::SessionRef; use std::future::Ready; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::{WhatAmI, ZenohId}; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 1f381486a0..0eb7515181 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -43,6 +43,11 @@ //! [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, //! as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. +use super::{ + selector::Selector, + session::{Session, Undeclarable}, +}; +use crate::net::primitives::Primitives; use std::{ convert::{TryFrom, TryInto}, future::Ready, @@ -56,11 +61,6 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{ - api::selector::Selector, api::session::Session, api::session::Undeclarable, - net::primitives::Primitives, -}; - #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { Borrowed(&'a keyexpr), diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index ad4cd3f7ec..c2a075ee52 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -15,38 +15,21 @@ //! Liveliness primitives. //! //! see [`Liveliness`] -use crate::api::handlers::IntoHandler; -use crate::api::query::{QueryConsolidation, QueryTarget}; -use crate::api::sample::Locality; -use crate::api::sample::SourceInfo; -use crate::api::session::Session; -use zenoh_core::Resolve; - -#[zenoh_macros::unstable] -use { - crate::{ - api::handlers::locked, - api::handlers::DefaultHandler, - api::key_expr::KeyExpr, - api::session::SessionRef, - api::session::Undeclarable, - api::subscriber::{Subscriber, SubscriberInner}, - prelude::*, - }, - crate::{api::query::Reply, api::Id}, - std::convert::TryInto, - std::future::Ready, - std::sync::Arc, - std::time::Duration, - zenoh_config::unwrap_or_default, - zenoh_core::AsyncResolve, - zenoh_core::Resolvable, - zenoh_core::Result as ZResult, - zenoh_core::SyncResolve, - zenoh_keyexpr::keyexpr, - zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo, - zenoh_protocol::network::request, +use super::{ + handlers::{locked, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + query::{QueryConsolidation, QueryTarget, Reply}, + sample::{Locality, Sample, SourceInfo}, + session::{Session, SessionRef, Undeclarable}, + subscriber::{Subscriber, SubscriberInner}, + Id, }; +use std::{convert::TryInto, future::Ready, sync::Arc, time::Duration}; +use zenoh_config::unwrap_or_default; +use zenoh_core::Resolve; +use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; +use zenoh_keyexpr::keyexpr; +use zenoh_protocol::network::{declare::subscriber::ext::SubscriberInfo, request}; #[zenoh_macros::unstable] pub(crate) static PREFIX_LIVELINESS: &str = crate::net::routing::PREFIX_LIVELINESS; diff --git a/zenoh/src/api/plugins.rs b/zenoh/src/api/plugins.rs index 36cde5ba34..23f8b2b811 100644 --- a/zenoh/src/api/plugins.rs +++ b/zenoh/src/api/plugins.rs @@ -14,9 +14,9 @@ //! `zenohd`'s plugin system. For more details, consult the [detailed documentation](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Plugins/Zenoh%20Plugins.md). -use crate::{api::selector::Selector, net::runtime::Runtime}; +use super::selector::Selector; +use crate::net::runtime::Runtime; use zenoh_core::zconfigurable; - use zenoh_plugin_trait::{ Plugin, PluginControl, PluginInstance, PluginReport, PluginStatusRec, StructVersion, }; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 3d9df2b5bb..87ba666dda 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -13,46 +13,42 @@ // //! Publishing primitives. -use crate::api::builders::publication::{ - PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, - PublisherPutBuilder, +use super::{ + builders::publication::{ + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, + PublisherDeleteBuilder, PublisherPutBuilder, + }, + encoding::Encoding, + key_expr::KeyExpr, + payload::Payload, + sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, + session::{SessionRef, Undeclarable}, }; -use crate::api::encoding::Encoding; -use crate::api::key_expr::KeyExpr; -use crate::api::payload::Payload; -#[zenoh_macros::unstable] -use crate::api::sample::Attachment; -use crate::api::sample::Locality; -#[zenoh_macros::unstable] -use crate::api::sample::SourceInfo; -use crate::api::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; -use crate::api::session::SessionRef; -use crate::api::session::Undeclarable; use crate::net::primitives::Primitives; -#[cfg(feature = "unstable")] -use crate::{ - api::handlers::{Callback, DefaultHandler, IntoHandler}, - api::Id, -}; use futures::Sink; -use std::convert::TryFrom; -use std::future::Ready; -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + convert::TryFrom, + future::Ready, + pin::Pin, + task::{Context, Poll}, +}; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_keyexpr::keyexpr; -pub use zenoh_protocol::core::CongestionControl; -#[zenoh_macros::unstable] -use zenoh_protocol::core::EntityGlobalId; +use zenoh_protocol::{ + core::CongestionControl, + network::{push::ext, Push}, + zenoh::{Del, PushBody, Put}, +}; +use zenoh_result::{Error, ZResult}; + #[zenoh_macros::unstable] -use zenoh_protocol::core::EntityId; -use zenoh_protocol::network::push::ext; -use zenoh_protocol::network::Push; -use zenoh_protocol::zenoh::Del; -use zenoh_protocol::zenoh::PushBody; -use zenoh_protocol::zenoh::Put; -use zenoh_result::Error; -use zenoh_result::ZResult; +use { + crate::api::handlers::{Callback, DefaultHandler, IntoHandler}, + crate::api::sample::{Attachment, SourceInfo}, + crate::api::Id, + zenoh_protocol::core::EntityGlobalId, + zenoh_protocol::core::EntityId, +}; #[zenoh_macros::unstable] #[derive(Clone)] diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 5ef4c14ce7..567bfe2c64 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -13,28 +13,27 @@ // //! Query primitives. -use crate::api::encoding::Encoding; -use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; -use crate::api::key_expr::KeyExpr; -use crate::api::payload::Payload; -use crate::api::publication::Priority; -#[zenoh_macros::unstable] -use crate::api::sample::Attachment; -use crate::api::sample::Locality; -use crate::api::sample::QoSBuilder; -use crate::api::sample::SourceInfo; -use crate::api::selector::Selector; -use crate::api::session::Session; -use crate::api::value::Value; -use crate::prelude::*; -use std::collections::HashMap; -use std::future::Ready; -use std::time::Duration; +use super::{ + builders::sample::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}, + encoding::Encoding, + handlers::{locked, Callback, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + payload::Payload, + publication::Priority, + sample::{Locality, QoSBuilder, Sample, SourceInfo}, + selector::Selector, + session::Session, + value::Value, +}; +use std::{collections::HashMap, future::Ready, time::Duration}; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_keyexpr::OwnedKeyExpr; -use zenoh_protocol::core::ZenohId; +use zenoh_protocol::core::{CongestionControl, ZenohId}; use zenoh_result::ZResult; +#[zenoh_macros::unstable] +use crate::api::sample::Attachment; + /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index bec6b5e29b..a60adfe74f 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -13,42 +13,42 @@ // //! Queryable primitives. -use crate::api::builders::sample::SampleBuilder; -use crate::api::encoding::Encoding; -use crate::api::handlers::{locked, DefaultHandler, IntoHandler}; -use crate::api::key_expr::KeyExpr; -use crate::api::payload::Payload; -use crate::api::publication::Priority; -use crate::api::sample::Locality; -use crate::api::sample::QoSBuilder; -#[cfg(feature = "unstable")] -use crate::api::sample::SourceInfo; -use crate::api::selector::Parameters; -use crate::api::selector::Selector; -use crate::api::session::SessionRef; -use crate::api::session::Undeclarable; -use crate::api::value::Value; -use crate::api::Id; +use super::{ + builders::sample::{ + QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, + ValueBuilderTrait, + }, + encoding::Encoding, + handlers::{locked, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + payload::Payload, + publication::Priority, + sample::{Locality, QoSBuilder, Sample, SampleKind}, + selector::{Parameters, Selector}, + session::{SessionRef, Undeclarable}, + value::Value, + Id, +}; use crate::net::primitives::Primitives; -use crate::prelude::*; -#[cfg(feature = "unstable")] -use crate::{api::query::ReplyKeyExpr, api::sample::Attachment}; -use std::fmt; -use std::future::Ready; -use std::ops::Deref; -use std::sync::Arc; +use std::{fmt, future::Ready, ops::Deref, sync::Arc}; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; -#[zenoh_macros::unstable] -use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::core::ZenohId; use zenoh_protocol::{ - core::{EntityId, WireExpr}, + core::{CongestionControl, EntityId, WireExpr, ZenohId}, network::{response, Mapping, RequestId, Response, ResponseFinal}, zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, }; use zenoh_result::ZResult; +#[zenoh_macros::unstable] +use { + super::{ + query::ReplyKeyExpr, + sample::{Attachment, SourceInfo}, + }, + zenoh_protocol::core::EntityGlobalId, +}; + pub(crate) struct QueryInner { /// The key expression of this Query. pub(crate) key_expr: KeyExpr<'static>, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 359af2a436..a51264e1a4 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -13,21 +13,24 @@ // //! Sample primitives -use crate::api::builders::sample::{QoSBuilderTrait, ValueBuilderTrait}; -use crate::api::encoding::Encoding; -use crate::api::key_expr::KeyExpr; -use crate::api::publication::Priority; -use crate::api::value::Value; -use crate::payload::Payload; +use super::{ + builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, + encoding::Encoding, + key_expr::KeyExpr, + payload::Payload, + publication::Priority, + value::Value, +}; +use std::{convert::TryFrom, fmt}; +use zenoh_protocol::{ + core::{CongestionControl, EntityGlobalId, Timestamp}, + network::declare::ext::QoSType, +}; + #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[zenoh_macros::unstable] use serde::Serialize; -use std::{convert::TryFrom, fmt}; -use zenoh_protocol::core::CongestionControl; -use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::core::Timestamp; -use zenoh_protocol::network::declare::ext::QoSType; pub type SourceSn = u64; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index bcc1482f1b..c15e9955a3 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -17,15 +17,9 @@ use futures::StreamExt; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; use tokio::net::UdpSocket; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::core::WhatAmIMatcher; +use zenoh_protocol::{core::WhatAmIMatcher, scouting::Hello}; use zenoh_result::ZResult; -/// Constants and helpers for zenoh `whatami` flags. -pub use zenoh_protocol::core::WhatAmI; - -/// A zenoh Hello message. -pub use zenoh_protocol::scouting::Hello; - /// A builder for initializing a [`Scout`]. /// /// # Examples diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 51b8296634..144b4ee8a0 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -14,12 +14,7 @@ //! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; -use zenoh_result::ZResult; -pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; - -use crate::{api::key_expr::KeyExpr, api::queryable::Query}; - +use super::{key_expr::KeyExpr, queryable::Query}; use std::{ borrow::{Borrow, Cow}, collections::HashMap, @@ -27,6 +22,9 @@ use std::{ hash::Hash, str::FromStr, }; +use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_result::ZResult; +pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index ae0593790e..880bf4405f 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -11,85 +11,50 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::admin; -use crate::api::builders::publication::PublicationBuilder; -use crate::api::builders::publication::PublicationBuilderDelete; -use crate::api::builders::publication::PublicationBuilderPut; -use crate::api::encoding::Encoding; -use crate::api::handlers::{Callback, DefaultHandler}; -use crate::api::info::SessionInfo; -use crate::api::key_expr::KeyExpr; -use crate::api::key_expr::KeyExprInner; -#[zenoh_macros::unstable] -use crate::api::liveliness::{Liveliness, LivelinessTokenState}; -use crate::api::payload::Payload; -#[zenoh_macros::unstable] -use crate::api::publication::MatchingListenerState; -#[zenoh_macros::unstable] -use crate::api::publication::MatchingStatus; -use crate::api::publication::Priority; -use crate::api::query::GetBuilder; -use crate::api::query::QueryState; -use crate::api::query::Reply; -use crate::api::queryable::Query; -use crate::api::queryable::QueryInner; -use crate::api::queryable::QueryableState; -#[cfg(feature = "unstable")] -use crate::api::sample::Attachment; -use crate::api::sample::DataInfo; -use crate::api::sample::DataInfoIntoSample; -use crate::api::sample::Locality; -use crate::api::sample::QoS; -use crate::api::sample::Sample; -use crate::api::sample::SampleKind; -#[cfg(feature = "unstable")] -use crate::api::sample::SourceInfo; -use crate::api::selector::Parameters; -use crate::api::selector::Selector; -use crate::api::selector::TIME_RANGE_KEY; -use crate::api::subscriber::SubscriberBuilder; -use crate::api::subscriber::SubscriberState; -use crate::api::value::Value; -use crate::api::Id; -use crate::net::primitives::Primitives; -use crate::net::routing::dispatcher::face::Face; -use crate::net::runtime::Runtime; -use crate::publication::*; -use crate::query::*; -use crate::queryable::*; +use super::{ + admin, + builders::publication::{ + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, + }, + encoding::Encoding, + handlers::{Callback, DefaultHandler}, + info::SessionInfo, + key_expr::{KeyExpr, KeyExprInner}, + payload::Payload, + publication::{Priority, Publisher}, + query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, + queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, + sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, + selector::{Parameters, Selector, TIME_RANGE_KEY}, + subscriber::{SubscriberBuilder, SubscriberState}, + value::Value, + Id, +}; +use crate::net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}; use log::{error, trace, warn}; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::convert::TryInto; -use std::fmt; -use std::future::Ready; -use std::ops::Deref; -use std::sync::atomic::{AtomicU16, Ordering}; -use std::sync::Arc; -use std::sync::RwLock; -use std::time::Duration; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + fmt, + future::Ready, + ops::Deref, + sync::{ + atomic::{AtomicU16, Ordering}, + Arc, RwLock, + }, + time::Duration, +}; use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; -use zenoh_config::unwrap_or_default; -use zenoh_config::Config; -use zenoh_config::Notifier; -use zenoh_core::Resolvable; -use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; -use zenoh_protocol::core::Reliability; -#[cfg(feature = "unstable")] -use zenoh_protocol::network::declare::SubscriberId; -#[cfg(feature = "unstable")] -use zenoh_protocol::network::ext; -use zenoh_protocol::network::AtomicRequestId; -use zenoh_protocol::network::RequestId; -use zenoh_protocol::zenoh::reply::ReplyBody; -use zenoh_protocol::zenoh::Del; -use zenoh_protocol::zenoh::Put; +use zenoh_config::{unwrap_or_default, Config, Notifier}; +use zenoh_core::{ + zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, SyncResolve, +}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID, + AtomicExprId, CongestionControl, ExprId, Reliability, WireExpr, ZenohId, EMPTY_EXPR_ID, }, network::{ declare::{ @@ -98,16 +63,28 @@ use zenoh_protocol::{ DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, request::{self, ext::TargetType, Request}, - Mapping, Push, Response, ResponseFinal, + AtomicRequestId, Mapping, Push, RequestId, Response, ResponseFinal, }, zenoh::{ query::{self, ext::QueryBodyType, Consolidation}, - PushBody, RequestBody, ResponseBody, + reply::ReplyBody, + Del, PushBody, Put, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; use zenoh_util::core::AsyncResolve; +#[zenoh_macros::unstable] +use { + super::{ + liveliness::{Liveliness, LivelinessTokenState}, + publication::{MatchingListenerState, MatchingStatus}, + sample::{Attachment, SourceInfo}, + }, + zenoh_protocol::network::declare::SubscriberId, + zenoh_protocol::network::ext, +}; + zconfigurable! { pub(crate) static ref API_DATA_RECEPTION_CHANNEL_SIZE: usize = 256; pub(crate) static ref API_QUERY_RECEPTION_CHANNEL_SIZE: usize = 256; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 34df5569f4..7ad0160ae3 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -13,24 +13,25 @@ // //! Subscribing primitives. -use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; -use crate::api::key_expr::KeyExpr; -use crate::api::sample::Locality; -use crate::api::sample::Sample; -use crate::api::session::Undeclarable; -use crate::api::Id; -use crate::{api::session::SessionRef, Result as ZResult}; -use std::fmt; -use std::future::Ready; -use std::ops::{Deref, DerefMut}; -use std::sync::Arc; +use super::{ + handlers::{locked, Callback, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + sample::{Locality, Sample}, + session::{SessionRef, Undeclarable}, + Id, +}; +use std::{ + fmt, + future::Ready, + ops::{Deref, DerefMut}, + sync::Arc, +}; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_protocol::{core::Reliability, network::declare::subscriber::ext::SubscriberInfo}; +use zenoh_result::ZResult; + #[cfg(feature = "unstable")] use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; - -/// The kind of reliability. -pub use zenoh_protocol::core::Reliability; pub(crate) struct SubscriberState { pub(crate) id: Id, diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs index cbdabe3a7e..5d0d06765d 100644 --- a/zenoh/src/api/time.rs +++ b/zenoh/src/api/time.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use std::convert::TryFrom; - use zenoh_protocol::core::{Timestamp, TimestampId}; /// Generates a reception [`Timestamp`] with id=0x01. diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index f75abd4241..60586ad040 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -13,8 +13,7 @@ // //! Value primitives. -use crate::api::builders::sample::ValueBuilderTrait; -use crate::{api::encoding::Encoding, payload::Payload}; +use super::{builders::sample::ValueBuilderTrait, encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c6b06259ec..c6e890cb27 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -196,18 +196,19 @@ pub mod selector { pub mod subscriber { pub use crate::api::subscriber::FlumeSubscriber; - pub use crate::api::subscriber::Reliability; pub use crate::api::subscriber::Subscriber; pub use crate::api::subscriber::SubscriberBuilder; + /// The kind of reliability. + pub use zenoh_protocol::core::Reliability; } pub mod publication { pub use crate::api::builders::publication::PublisherBuilder; - pub use crate::api::publication::CongestionControl; pub use crate::api::publication::Priority; pub use crate::api::publication::Publisher; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; + pub use zenoh_protocol::core::CongestionControl; } pub mod query { @@ -236,7 +237,10 @@ pub mod handlers { pub mod scouting { pub use crate::api::scouting::scout; pub use crate::api::scouting::ScoutBuilder; - pub use crate::api::scouting::WhatAmI; + /// Constants and helpers for zenoh `whatami` flags. + pub use zenoh_protocol::core::WhatAmI; + /// A zenoh Hello message. + pub use zenoh_protocol::scouting::Hello; } #[cfg(feature = "unstable")] From d0f87047d943b02138467f9ac7e9531bb20d301e Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 14:38:39 +0200 Subject: [PATCH 155/357] sample commented in prelude --- examples/examples/z_storage.rs | 1 + plugins/zenoh-plugin-example/src/lib.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 1 + .../src/replica/align_queryable.rs | 1 + plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs | 2 +- plugins/zenoh-plugin-storage-manager/tests/operations.rs | 1 + plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 1 + zenoh-ext/src/publication_cache.rs | 3 +-- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh-ext/src/subscriber_ext.rs | 2 +- zenoh/src/api/publication.rs | 6 +++--- zenoh/src/prelude.rs | 3 +-- zenoh/tests/events.rs | 1 + zenoh/tests/liveliness.rs | 2 +- zenoh/tests/session.rs | 1 + 15 files changed, 17 insertions(+), 11 deletions(-) diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 4c6ed0ede5..07f54ce5ff 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; use zenoh::config::Config; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::prelude::r#async::*; +use zenoh::sample::{Sample, SampleKind}; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index a1e2af6574..6e9d0a917d 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -26,6 +26,7 @@ use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; +use zenoh::sample::Sample; use zenoh::session::SessionDeclarations; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 83085fd449..6465d74efd 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -36,6 +36,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; +use zenoh::sample::{Sample, SampleKind}; use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; use zenoh::session::{Session, SessionDeclarations}; use zenoh::value::Value; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 00d198bef6..e33b0c519d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -21,6 +21,7 @@ use std::str::FromStr; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; +use zenoh::sample::Sample; use zenoh::selector::Selector; use zenoh::session::Session; use zenoh::session::SessionDeclarations; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 0d750c8810..466b415d73 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::SampleBuilder; +use zenoh::sample::{Sample, SampleBuilder}; use zenoh::session::Session; use zenoh::time::Timestamp; use zenoh::value::Value; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 1d16ec23ea..bd612bec87 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -24,6 +24,7 @@ use zenoh::config::{Config, ValidatedMap}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::sample::Sample; use zenoh::session::Session; use zenoh::time::Timestamp; use zenoh_core::zasync_executor_init; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 4d8e72d55f..054be54ce8 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -25,6 +25,7 @@ use zenoh::config::{Config, ValidatedMap}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::sample::Sample; use zenoh::session::Session; use zenoh::time::Timestamp; use zenoh_core::zasync_executor_init; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 733509f619..25311ca647 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -16,9 +16,8 @@ use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; use std::future::Ready; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; -use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; -use zenoh::sample::Locality; +use zenoh::sample::{Locality, Sample}; use zenoh::selector::Parameters; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 23ba054ded..34b0ee9bb5 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -21,7 +21,7 @@ use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::{Locality, SampleBuilder}; +use zenoh::sample::{Locality, Sample, SampleBuilder}; use zenoh::selector::Selector; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 7d77fac05b..c758f910c2 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -19,8 +19,8 @@ use zenoh::sample::Locality; use zenoh::Result as ZResult; use zenoh::{ liveliness::LivelinessSubscriberBuilder, - prelude::Sample, query::{QueryConsolidation, QueryTarget}, + sample::Sample, subscriber::{Reliability, Subscriber, SubscriberBuilder}, }; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 87ba666dda..2c3d21eec4 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -1090,9 +1090,9 @@ impl Drop for MatchingListenerInner<'_> { #[cfg(test)] mod tests { + use crate::api::{sample::SampleKind, session::SessionDeclarations}; use zenoh_config::Config; - - use crate::api::session::SessionDeclarations; + use zenoh_core::SyncResolve; #[test] fn priority_from() { @@ -1120,7 +1120,7 @@ mod tests { #[test] fn sample_kind_integrity_in_publication() { - use crate::{api::session::open, prelude::sync::*}; + use crate::api::session::open; const KEY_EXPR: &str = "test/sample_kind_integrity/publication"; const VALUE: &str = "zenoh"; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 77f4087d3f..0ca952bc16 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -52,8 +52,7 @@ pub(crate) mod common { // pub use crate::api::sample::Locality; // #[zenoh_macros::unstable] // pub use crate::api::sample::SourceInfo; - pub use crate::api::sample::{Sample, SampleKind}; - + // pub use crate::api::sample::{Sample, SampleKind}; pub use crate::api::publication::Priority; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index aafdfdf7d5..8b5c79bb97 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -14,6 +14,7 @@ use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::query::Reply; +use zenoh::sample::SampleKind; use zenoh::session::{Session, SessionDeclarations}; use zenoh_config::peer; use zenoh_core::ztimeout; diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 0af0b64164..b672227ab9 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -22,7 +22,7 @@ const SLEEP: Duration = Duration::from_secs(1); #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_liveliness() { - use zenoh::session::SessionDeclarations; + use zenoh::{sample::SampleKind, session::SessionDeclarations}; let mut c1 = config::peer(); c1.listen diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index f36775bca2..44354b5ddb 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::sample::SampleKind; use zenoh::session::{Session, SessionDeclarations}; use zenoh::value::Value; use zenoh_config as config; From 118b8c08ea74d44c4725d6eee550237fc9861446 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 14:39:55 +0200 Subject: [PATCH 156/357] priority commented in prelude --- examples/examples/z_pub_thr.rs | 2 +- zenoh-ext/src/group.rs | 1 + zenoh/src/prelude.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 75e2d72fbd..8450855956 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -16,7 +16,7 @@ use clap::Parser; use std::convert::TryInto; use zenoh::payload::Payload; use zenoh::prelude::sync::*; -use zenoh::publication::CongestionControl; +use zenoh::publication::{CongestionControl, Priority}; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 5e9b9e66f3..60c55fdc2f 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -30,6 +30,7 @@ use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; +use zenoh::publication::Priority; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; use zenoh::session::Session; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 0ca952bc16..242476898a 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -53,7 +53,7 @@ pub(crate) mod common { // #[zenoh_macros::unstable] // pub use crate::api::sample::SourceInfo; // pub use crate::api::sample::{Sample, SampleKind}; - pub use crate::api::publication::Priority; + // pub use crate::api::publication::Priority; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; From 0406c5f60bf2c9dc18898c935493532d693640e2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 14:43:41 +0200 Subject: [PATCH 157/357] congestion control commented in prelude --- zenoh/src/prelude.rs | 7 +++---- zenoh/tests/qos.rs | 1 + zenoh/tests/routing.rs | 1 + zenoh/tests/session.rs | 2 ++ zenoh/tests/unicity.rs | 2 ++ 5 files changed, 9 insertions(+), 4 deletions(-) diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 242476898a..76e41fe579 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -54,10 +54,9 @@ pub(crate) mod common { // pub use crate::api::sample::SourceInfo; // pub use crate::api::sample::{Sample, SampleKind}; // pub use crate::api::publication::Priority; - #[zenoh_macros::unstable] - pub use crate::api::publication::PublisherDeclarations; - pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - + // #[zenoh_macros::unstable] + // pub use crate::api::publication::PublisherDeclarations; + // pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; pub use crate::api::builders::sample::{ QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, }; diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index b9f3ab3945..e11fcf4e22 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,6 +13,7 @@ // use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::{publication::Priority, session::SessionDeclarations}; use zenoh_core::ztimeout; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index b0d789312a..98c2f002f7 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -18,6 +18,7 @@ use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; use zenoh::prelude::r#async::*; +use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::{Session, SessionDeclarations}; use zenoh::Result; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 44354b5ddb..313b30e141 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -16,8 +16,10 @@ use std::sync::Arc; use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::publication::CongestionControl; use zenoh::sample::SampleKind; use zenoh::session::{Session, SessionDeclarations}; +use zenoh::subscriber::Reliability; use zenoh::value::Value; use zenoh_config as config; use zenoh_core::ztimeout; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index f37f6cb852..1c76b1f8fc 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -11,6 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // +use config::WhatAmI; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; @@ -18,6 +19,7 @@ use tokio::runtime::Handle; use zenoh::config::EndPoint; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::publication::CongestionControl; use zenoh::session::{Session, SessionDeclarations}; use zenoh_config as config; use zenoh_core::ztimeout; From 77b5759e0866ad301b338ff88bb187e905ddff31 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 15:14:54 +0200 Subject: [PATCH 158/357] builder traits removed from prelude --- examples/examples/z_get.rs | 1 + examples/examples/z_ping.rs | 1 + examples/examples/z_pong.rs | 1 + examples/examples/z_pub_thr.rs | 1 + plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 1 + plugins/zenoh-plugin-rest/src/lib.rs | 2 +- .../src/replica/align_queryable.rs | 2 ++ .../zenoh-plugin-storage-manager/src/replica/aligner.rs | 2 +- .../zenoh-plugin-storage-manager/src/replica/storage.rs | 4 ++-- zenoh-ext/src/group.rs | 1 + zenoh-ext/src/querying_subscriber.rs | 3 +-- zenoh/src/api/query.rs | 9 ++++++--- zenoh/src/api/queryable.rs | 6 ++---- zenoh/src/api/session.rs | 5 +++-- zenoh/src/prelude.rs | 6 +++--- zenoh/tests/attachments.rs | 2 +- zenoh/tests/handler.rs | 2 +- zenoh/tests/session.rs | 2 +- zenoh/tests/unicity.rs | 1 + 19 files changed, 31 insertions(+), 21 deletions(-) diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 6326ddf6c6..67a393c61f 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -16,6 +16,7 @@ use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::query::QueryTarget; +use zenoh::sample::ValueBuilderTrait; use zenoh::selector::Selector; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index ef9bc08617..c9cdd0635b 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -18,6 +18,7 @@ use zenoh::key_expr::keyexpr; use zenoh::payload::Payload; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 7446456938..ea992ab5bc 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -16,6 +16,7 @@ use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 8450855956..d8b94c88b0 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -17,6 +17,7 @@ use std::convert::TryInto; use zenoh::payload::Payload; use zenoh::prelude::sync::*; use zenoh::publication::{CongestionControl, Priority}; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 28627999f3..c7d22988e9 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -17,6 +17,7 @@ use zenoh::config::Config; use zenoh::key_expr::keyexpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; const HTML: &str = r#" diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 6465d74efd..ddd866efcb 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -36,7 +36,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; -use zenoh::sample::{Sample, SampleKind}; +use zenoh::sample::{Sample, SampleKind, ValueBuilderTrait}; use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; use zenoh::session::{Session, SessionDeclarations}; use zenoh::value::Value; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index e33b0c519d..581e93cf6b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -22,6 +22,8 @@ use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::sample::Sample; +use zenoh::sample::TimestampBuilderTrait; +use zenoh::sample::ValueBuilderTrait; use zenoh::selector::Selector; use zenoh::session::Session; use zenoh::session::SessionDeclarations; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 466b415d73..61d5d074e4 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -21,7 +21,7 @@ use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::{Sample, SampleBuilder}; +use zenoh::sample::{Sample, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::session::Session; use zenoh::time::Timestamp; use zenoh::value::Value; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 88b21f0a0c..c568835831 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -30,8 +30,8 @@ use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::SampleBuilder; -use zenoh::sample::{Sample, SampleKind}; +use zenoh::sample::{Sample, SampleKind, TimestampBuilderTrait}; +use zenoh::sample::{SampleBuilder, ValueBuilderTrait}; use zenoh::session::SessionDeclarations; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 60c55fdc2f..12e349f299 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -33,6 +33,7 @@ use zenoh::prelude::r#async::*; use zenoh::publication::Priority; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::Session; use zenoh::session::SessionDeclarations; use zenoh::Error as ZError; diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 34b0ee9bb5..d5ed3e9987 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -19,9 +19,8 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::{Locality, Sample, SampleBuilder}; +use zenoh::sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}; use zenoh::selector::Selector; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 567bfe2c64..8b0f0c9f6c 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -14,13 +14,13 @@ //! Query primitives. use super::{ - builders::sample::{QoSBuilderTrait, SampleBuilderTrait, ValueBuilderTrait}, + builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, encoding::Encoding, handlers::{locked, Callback, DefaultHandler, IntoHandler}, key_expr::KeyExpr, payload::Payload, publication::Priority, - sample::{Locality, QoSBuilder, Sample, SourceInfo}, + sample::{Locality, QoSBuilder, Sample}, selector::Selector, session::Session, value::Value, @@ -32,7 +32,10 @@ use zenoh_protocol::core::{CongestionControl, ZenohId}; use zenoh_result::ZResult; #[zenoh_macros::unstable] -use crate::api::sample::Attachment; +use super::{ + builders::sample::SampleBuilderTrait, + sample::{Attachment, SourceInfo}, +}; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index a60adfe74f..c966e02101 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -14,10 +14,7 @@ //! Queryable primitives. use super::{ - builders::sample::{ - QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, TimestampBuilderTrait, - ValueBuilderTrait, - }, + builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, encoding::Encoding, handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, @@ -43,6 +40,7 @@ use zenoh_result::ZResult; #[zenoh_macros::unstable] use { super::{ + builders::sample::SampleBuilderTrait, query::ReplyKeyExpr, sample::{Attachment, SourceInfo}, }, diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 880bf4405f..136dfda1bf 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -21,7 +21,7 @@ use super::{ info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, payload::Payload, - publication::{Priority, Publisher}, + publication::Priority, query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, @@ -74,10 +74,11 @@ use zenoh_protocol::{ use zenoh_result::ZResult; use zenoh_util::core::AsyncResolve; -#[zenoh_macros::unstable] +#[cfg(feature = "unstable")] use { super::{ liveliness::{Liveliness, LivelinessTokenState}, + publication::Publisher, publication::{MatchingListenerState, MatchingStatus}, sample::{Attachment, SourceInfo}, }, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 76e41fe579..ef304b011b 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -57,9 +57,9 @@ pub(crate) mod common { // #[zenoh_macros::unstable] // pub use crate::api::publication::PublisherDeclarations; // pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - pub use crate::api::builders::sample::{ - QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, - }; + // pub use crate::api::builders::sample::{ + // QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + // }; #[zenoh_macros::unstable] pub use crate::api::builders::sample::SampleBuilderTrait; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 073d5537bd..e83e5700d8 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -64,7 +64,7 @@ fn pubsub() { fn queries() { use zenoh::{ prelude::sync::*, - sample::{Attachment, SampleBuilderTrait}, + sample::{Attachment, SampleBuilderTrait, ValueBuilderTrait}, session::SessionDeclarations, }; use zenoh_config::Config; diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index fdb8e225fa..a0c4129f3a 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -1,4 +1,4 @@ -use zenoh::session::SessionDeclarations; +use zenoh::{sample::ValueBuilderTrait, session::SessionDeclarations}; use zenoh_config::Config; // diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 313b30e141..0ecd529c33 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -17,7 +17,7 @@ use std::time::Duration; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::sample::SampleKind; +use zenoh::sample::{QoSBuilderTrait, SampleKind}; use zenoh::session::{Session, SessionDeclarations}; use zenoh::subscriber::Reliability; use zenoh::value::Value; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 1c76b1f8fc..84e243e801 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -20,6 +20,7 @@ use zenoh::config::EndPoint; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; +use zenoh::sample::QoSBuilderTrait; use zenoh::session::{Session, SessionDeclarations}; use zenoh_config as config; use zenoh_core::ztimeout; From 59625a63dfccb6645d47e797563cf7e782fd61ca Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 15:17:39 +0200 Subject: [PATCH 159/357] samplebuilder removed from prelude --- examples/examples/z_pub.rs | 1 + zenoh/src/prelude.rs | 76 +++++++++++++++++++------------------- zenoh/tests/attachments.rs | 2 +- 3 files changed, 40 insertions(+), 39 deletions(-) diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 10209b04e6..a09754914b 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -16,6 +16,7 @@ use std::time::Duration; use zenoh::config::Config; use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; +use zenoh::sample::SampleBuilderTrait; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index ef304b011b..d482ffae75 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -22,56 +22,56 @@ //! use zenoh::prelude::r#async::*; //! ``` -pub use common::*; -pub(crate) mod common { - // pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; - // pub use zenoh_buffers::{ - // buffer::{Buffer, SplitBuffer}, - // reader::HasReader, - // writer::HasWriter, - // }; - // pub use zenoh_core::Resolve; +// pub use common::*; +// pub(crate) mod common { +// pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; +// pub use zenoh_buffers::{ +// buffer::{Buffer, SplitBuffer}, +// reader::HasReader, +// writer::HasWriter, +// }; +// pub use zenoh_core::Resolve; - // pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; - // #[zenoh_macros::unstable] - // pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; +// pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; +// #[zenoh_macros::unstable] +// pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; - // pub use crate::config::{self, Config}; - // pub use crate::handlers::IntoHandler; - // pub use crate::selector::{Parameter, Parameters, Selector}; - // pub use crate::session::{Session, SessionDeclarations}; +// pub use crate::config::{self, Config}; +// pub use crate::handlers::IntoHandler; +// pub use crate::selector::{Parameter, Parameters, Selector}; +// pub use crate::session::{Session, SessionDeclarations}; - // pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; +// pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; - // pub use crate::api::encoding::Encoding; - // pub use crate::api::value::Value; - /// The encoding of a zenoh `Value`. - // pub use crate::payload::{Deserialize, Payload, Serialize}; +// pub use crate::api::encoding::Encoding; +// pub use crate::api::value::Value; +/// The encoding of a zenoh `Value`. +// pub use crate::payload::{Deserialize, Payload, Serialize}; - // #[zenoh_macros::unstable] - // pub use crate::api::sample::Locality; - // #[zenoh_macros::unstable] - // pub use crate::api::sample::SourceInfo; - // pub use crate::api::sample::{Sample, SampleKind}; - // pub use crate::api::publication::Priority; - // #[zenoh_macros::unstable] - // pub use crate::api::publication::PublisherDeclarations; - // pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - // pub use crate::api::builders::sample::{ - // QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, - // }; +// #[zenoh_macros::unstable] +// pub use crate::api::sample::Locality; +// #[zenoh_macros::unstable] +// pub use crate::api::sample::SourceInfo; +// pub use crate::api::sample::{Sample, SampleKind}; +// pub use crate::api::publication::Priority; +// #[zenoh_macros::unstable] +// pub use crate::api::publication::PublisherDeclarations; +// pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; +// pub use crate::api::builders::sample::{ +// QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, +// }; - #[zenoh_macros::unstable] - pub use crate::api::builders::sample::SampleBuilderTrait; -} +// #[zenoh_macros::unstable] +// pub use crate::api::builders::sample::SampleBuilderTrait; +// } /// Prelude to import when using Zenoh's sync API. pub mod sync { - pub use super::common::*; + // pub use super::common::*; pub use zenoh_core::SyncResolve; } /// Prelude to import when using Zenoh's async API. pub mod r#async { - pub use super::common::*; + // pub use super::common::*; pub use zenoh_core::AsyncResolve; } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index e83e5700d8..99177b17de 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,7 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::{prelude::sync::*, session::SessionDeclarations}; + use zenoh::{prelude::sync::*, sample::SampleBuilderTrait, session::SessionDeclarations}; use zenoh_config::Config; let zenoh = zenoh::open(Config::default()).res().unwrap(); From 687b9f07ce8e9711e8acaf4f12476e194a6e028e Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 16:50:11 +0200 Subject: [PATCH 160/357] prelude disabled --- examples/examples/z_delete.rs | 2 +- examples/examples/z_forward.rs | 2 +- examples/examples/z_get.rs | 2 +- examples/examples/z_info.rs | 2 +- examples/examples/z_ping.rs | 2 +- examples/examples/z_pong.rs | 2 +- examples/examples/z_pub.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- examples/examples/z_pull.rs | 4 ++-- examples/examples/z_put.rs | 2 +- examples/examples/z_put_float.rs | 2 +- examples/examples/z_queryable.rs | 2 +- examples/examples/z_scout.rs | 2 +- examples/examples/z_storage.rs | 2 +- examples/examples/z_sub.rs | 2 +- examples/examples/z_sub_thr.rs | 2 +- plugins/zenoh-plugin-example/src/lib.rs | 2 +- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/lib.rs | 2 +- .../src/replica/align_queryable.rs | 2 +- .../zenoh-plugin-storage-manager/src/replica/aligner.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/replica/mod.rs | 2 +- .../zenoh-plugin-storage-manager/src/replica/storage.rs | 2 +- plugins/zenoh-plugin-storage-manager/tests/operations.rs | 2 +- plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 2 +- zenoh-ext/examples/z_member.rs | 2 +- zenoh-ext/examples/z_pub_cache.rs | 2 +- zenoh-ext/examples/z_query_sub.rs | 2 +- zenoh-ext/examples/z_view_size.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh/src/api/publication.rs | 3 ++- zenoh/src/lib.rs | 7 ++++++- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/tests/attachments.rs | 5 +++-- zenoh/tests/connection_retry.rs | 3 +-- zenoh/tests/events.rs | 2 +- zenoh/tests/handler.rs | 6 ++++-- zenoh/tests/interceptors.rs | 7 +------ zenoh/tests/liveliness.rs | 2 +- zenoh/tests/matching.rs | 2 +- zenoh/tests/qos.rs | 2 +- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 2 +- zenoh/tests/unicity.rs | 2 +- 45 files changed, 57 insertions(+), 54 deletions(-) diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index 7f48f90c96..f441c1b68d 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index a4c3cb4ced..06d85b3931 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 67a393c61f..77304770a4 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -14,7 +14,7 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh::query::QueryTarget; use zenoh::sample::ValueBuilderTrait; use zenoh::selector::Selector; diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index c63e5974e9..bb81030b3a 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -14,7 +14,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::config::ZenohId; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index c9cdd0635b..08cd9e8817 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -14,9 +14,9 @@ use clap::Parser; use std::time::{Duration, Instant}; use zenoh::config::Config; +use zenoh::core::SyncResolve; use zenoh::key_expr::keyexpr; use zenoh::payload::Payload; -use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index ea992ab5bc..c3225809fa 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::SyncResolve; use zenoh::key_expr::keyexpr; -use zenoh::prelude::sync::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index a09754914b..a0d8edadaf 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -14,8 +14,8 @@ use clap::Parser; use std::time::Duration; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::sample::SampleBuilderTrait; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index d8b94c88b0..fd50118022 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -14,8 +14,8 @@ use clap::Parser; use std::convert::TryInto; +use zenoh::core::SyncResolve; use zenoh::payload::Payload; -use zenoh::prelude::sync::*; use zenoh::publication::{CongestionControl, Priority}; use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 6a07de8358..b405fd331b 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,9 +13,9 @@ // use clap::Parser; use std::time::Duration; +use zenoh::core::AsyncResolve; use zenoh::{ - config::Config, handlers::RingBuffer, key_expr::KeyExpr, prelude::r#async::*, - session::SessionDeclarations, + config::Config, handlers::RingBuffer, key_expr::KeyExpr, session::SessionDeclarations, }; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index b6039d09ba..a2c6ac2574 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 86f1ce3c08..5fce2a5935 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index ac58d9f094..49a5b946a7 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index a46b7c49fe..b0d34061d3 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh::scouting::scout; use zenoh::scouting::WhatAmI; diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 07f54ce5ff..ed9a2b0b89 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -17,8 +17,8 @@ use clap::Parser; use futures::select; use std::collections::HashMap; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::prelude::r#async::*; use zenoh::sample::{Sample, SampleKind}; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index df77429356..ae1e7292e0 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -13,8 +13,8 @@ // use clap::Parser; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index b4b6ecd0e5..d94ca4fa0f 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -14,7 +14,7 @@ use clap::Parser; use std::time::Instant; use zenoh::config::Config; -use zenoh::prelude::sync::*; +use zenoh::core::SyncResolve; use zenoh::session::SessionDeclarations; use zenoh_examples::CommonArgs; diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 6e9d0a917d..f5565f841c 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -24,11 +24,11 @@ use std::sync::{ }; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; use zenoh::sample::Sample; use zenoh::session::SessionDeclarations; use zenoh_core::zlock; +use zenoh_core::AsyncResolve; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index c7d22988e9..40b03c3a59 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -14,8 +14,8 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::config::Config; +use zenoh::core::AsyncResolve; use zenoh::key_expr::keyexpr; -use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::SessionDeclarations; diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index ddd866efcb..44b1013936 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,11 +29,11 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; +use zenoh::core::AsyncResolve; use zenoh::encoding::Encoding; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::payload::{Payload, StringOrBase64}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::sample::{Sample, SampleKind, ValueBuilderTrait}; diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 12fbede21d..bea144e39a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -27,9 +27,9 @@ use std::convert::TryFrom; use std::sync::Arc; use std::sync::Mutex; use storages_mgt::StorageMessage; +use zenoh::core::SyncResolve; use zenoh::key_expr::keyexpr; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::prelude::sync::*; use zenoh::runtime::Runtime; use zenoh::selector::Selector; use zenoh::session::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 581e93cf6b..d7620aad13 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,9 +18,9 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; +use zenoh::core::AsyncResolve; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; -use zenoh::prelude::r#async::*; use zenoh::sample::Sample; use zenoh::sample::TimestampBuilderTrait; use zenoh::sample::ValueBuilderTrait; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 61d5d074e4..7e604c1f01 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,9 +18,9 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; +use zenoh::core::AsyncResolve; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; use zenoh::payload::StringOrBase64; -use zenoh::prelude::r#async::*; use zenoh::sample::{Sample, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}; use zenoh::session::Session; use zenoh::time::Timestamp; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 7192e3ab7e..3aec2ec476 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,9 +26,9 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; +use zenoh::core::AsyncResolve; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::StringOrBase64; -use zenoh::prelude::r#async::*; use zenoh::sample::Locality; use zenoh::session::Session; use zenoh::session::SessionDeclarations; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index c568835831..f0733c2371 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,12 +23,12 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; +use zenoh::core::AsyncResolve; use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; use zenoh::key_expr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; use zenoh::key_expr::keyexpr_tree::{KeBoxTree, NonWild, UnknownWildness}; use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; -use zenoh::prelude::r#async::*; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::{Sample, SampleKind, TimestampBuilderTrait}; use zenoh::sample::{SampleBuilder, ValueBuilderTrait}; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index bd612bec87..1def746449 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -21,8 +21,8 @@ use std::thread::sleep; use async_std::task; use zenoh::config::{Config, ValidatedMap}; +use zenoh::core::AsyncResolve; use zenoh::payload::StringOrBase64; -use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::sample::Sample; use zenoh::session::Session; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 054be54ce8..bf1ecf707f 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -22,8 +22,8 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; use zenoh::config::{Config, ValidatedMap}; +use zenoh::core::AsyncResolve; use zenoh::payload::StringOrBase64; -use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::sample::Sample; use zenoh::session::Session; diff --git a/zenoh-ext/examples/z_member.rs b/zenoh-ext/examples/z_member.rs index fb10ac4cd8..217c0d90e3 100644 --- a/zenoh-ext/examples/z_member.rs +++ b/zenoh-ext/examples/z_member.rs @@ -15,7 +15,7 @@ use futures::StreamExt; use std::sync::Arc; use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh_ext::group::*; #[tokio::main] diff --git a/zenoh-ext/examples/z_pub_cache.rs b/zenoh-ext/examples/z_pub_cache.rs index e564ffb8f1..50b6d11c53 100644 --- a/zenoh-ext/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/z_pub_cache.rs @@ -14,7 +14,7 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh_ext::*; #[tokio::main] diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index 61ea0eac92..c3fc363069 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -14,7 +14,7 @@ use clap::arg; use clap::Command; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh::query::ReplyKeyExpr; use zenoh::session::SessionDeclarations; use zenoh_ext::*; diff --git a/zenoh-ext/examples/z_view_size.rs b/zenoh-ext/examples/z_view_size.rs index 64e7b3ea4c..8496629646 100644 --- a/zenoh-ext/examples/z_view_size.rs +++ b/zenoh-ext/examples/z_view_size.rs @@ -15,7 +15,7 @@ use clap::{arg, Command}; use std::sync::Arc; use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::core::AsyncResolve; use zenoh_ext::group::*; #[tokio::main] diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 12e349f299..d6175b502d 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -29,7 +29,6 @@ use zenoh::key_expr::keyexpr; use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; use zenoh::payload::PayloadReader; -use zenoh::prelude::r#async::*; use zenoh::publication::Priority; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; @@ -38,6 +37,7 @@ use zenoh::session::Session; use zenoh::session::SessionDeclarations; use zenoh::Error as ZError; use zenoh::Result as ZResult; +use zenoh_core::AsyncResolve; use zenoh_result::bail; use zenoh_sync::Condition; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 2c3d21eec4..ad8be76e65 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -1148,7 +1148,8 @@ mod tests { #[test] fn sample_kind_integrity_in_put_builder() { - use crate::{api::session::open, prelude::sync::*}; + use crate::api::session::open; + use zenoh_core::SyncResolve; const KEY_EXPR: &str = "test/sample_kind_integrity/put_builder"; const VALUE: &str = "zenoh"; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c6e890cb27..9cbe4ca5e5 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -116,6 +116,11 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; +pub mod core { + pub use zenoh_core::AsyncResolve; + pub use zenoh_core::SyncResolve; +} + /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub mod buffers { @@ -277,4 +282,4 @@ pub mod shm { pub use zenoh_shm::SharedMemoryManager; } -pub mod prelude; +// pub mod prelude; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index be992621e6..968f6cc3de 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -21,7 +21,6 @@ use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; use crate::payload::Payload; -use crate::prelude::sync::SyncResolve; use log::{error, trace}; use serde_json::json; use std::collections::HashMap; @@ -31,6 +30,7 @@ use std::sync::Arc; use std::sync::Mutex; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; +use zenoh_core::SyncResolve; use zenoh_plugin_trait::{PluginControl, PluginStatus}; use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 99177b17de..41c8d85dd7 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,8 +14,9 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::{prelude::sync::*, sample::SampleBuilderTrait, session::SessionDeclarations}; + use zenoh::{sample::SampleBuilderTrait, session::SessionDeclarations}; use zenoh_config::Config; + use zenoh_core::SyncResolve; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -63,11 +64,11 @@ fn pubsub() { #[test] fn queries() { use zenoh::{ - prelude::sync::*, sample::{Attachment, SampleBuilderTrait, ValueBuilderTrait}, session::SessionDeclarations, }; use zenoh_config::Config; + use zenoh_core::SyncResolve; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 675b4eb879..f510e4f54a 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,6 +1,5 @@ use zenoh_config::{Config, ConnectionRetryConf, EndPoint, ValidatedMap}; - -use zenoh::prelude::sync::*; +use zenoh_core::SyncResolve; #[test] fn retry_config_overriding() { diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 8b5c79bb97..3069e53e24 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -12,12 +12,12 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::sample::SampleKind; use zenoh::session::{Session, SessionDeclarations}; use zenoh_config::peer; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; const TIMEOUT: Duration = Duration::from_secs(10); diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index a0c4129f3a..82030daef5 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -17,7 +17,8 @@ use zenoh_config::Config; #[test] fn pubsub_with_ringbuffer() { use std::{thread, time::Duration}; - use zenoh::{handlers::RingBuffer, prelude::sync::*}; + use zenoh::handlers::RingBuffer; + use zenoh_core::SyncResolve; let zenoh = zenoh::open(Config::default()).res().unwrap(); let sub = zenoh @@ -49,7 +50,8 @@ fn pubsub_with_ringbuffer() { #[test] fn query_with_ringbuffer() { - use zenoh::{handlers::RingBuffer, prelude::sync::*}; + use zenoh::handlers::RingBuffer; + use zenoh_core::SyncResolve; let zenoh = zenoh::open(Config::default()).res().unwrap(); let queryable = zenoh diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 1ff1f49651..c20dcafdb7 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -15,6 +15,7 @@ use std::sync::{Arc, Mutex}; use zenoh::session::SessionDeclarations; use zenoh_config::{Config, ValidatedMap}; use zenoh_core::zlock; +use zenoh_core::SyncResolve; struct IntervalCounter { first_tick: bool, @@ -63,8 +64,6 @@ impl IntervalCounter { fn downsampling_by_keyexpr_impl(egress: bool) { let _ = env_logger::builder().is_test(true).try_init(); - use zenoh::prelude::sync::*; - let ds_cfg = format!( r#" [ @@ -180,8 +179,6 @@ fn downsampling_by_keyexpr() { fn downsampling_by_interface_impl(egress: bool) { let _ = env_logger::builder().is_test(true).try_init(); - use zenoh::prelude::sync::*; - let ds_cfg = format!( r#" [ @@ -282,8 +279,6 @@ fn downsampling_by_interface() { fn downsampling_config_error_wrong_strategy() { let _ = env_logger::builder().is_test(true).try_init(); - use zenoh::prelude::sync::*; - let mut config = Config::default(); config .insert_json5( diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index b672227ab9..43dfd37281 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::prelude::r#async::*; use zenoh_config as config; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index b22d8dd1c8..341f66bba7 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -14,11 +14,11 @@ use std::str::FromStr; use std::time::Duration; use zenoh::config::Locator; -use zenoh::prelude::r#async::*; use zenoh::session::Session; use zenoh_config as config; use zenoh_config::peer; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; use zenoh_result::ZResult as Result; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index e11fcf4e22..5e3f507006 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,11 +12,11 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::{publication::Priority, session::SessionDeclarations}; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 98c2f002f7..c0879bdb7e 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -17,12 +17,12 @@ use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::{Session, SessionDeclarations}; use zenoh::Result; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; use zenoh_result::bail; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 0ecd529c33..b325e7601b 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -15,7 +15,6 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::sample::{QoSBuilderTrait, SampleKind}; use zenoh::session::{Session, SessionDeclarations}; @@ -23,6 +22,7 @@ use zenoh::subscriber::Reliability; use zenoh::value::Value; use zenoh_config as config; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 84e243e801..8ad80b7315 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -18,12 +18,12 @@ use std::time::Duration; use tokio::runtime::Handle; use zenoh::config::EndPoint; use zenoh::key_expr::KeyExpr; -use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::{Session, SessionDeclarations}; use zenoh_config as config; use zenoh_core::ztimeout; +use zenoh_core::AsyncResolve; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From c24ea14833f24eb1a0c687dd77886f085728eab4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 17:38:00 +0200 Subject: [PATCH 161/357] zenoh-ext depends on zenoh only --- Cargo.lock | 6 ------ zenoh-ext/Cargo.toml | 14 +++++++------- zenoh-ext/src/group.rs | 6 +++--- zenoh-ext/src/lib.rs | 2 +- zenoh-ext/src/publication_cache.rs | 14 ++++++++------ zenoh-ext/src/querying_subscriber.rs | 11 ++++++----- zenoh-ext/src/session_ext.rs | 9 +++++---- zenoh/src/lib.rs | 14 ++++++++++++++ 8 files changed, 44 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75a045d9b3..f2f2b0f5e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4623,12 +4623,6 @@ dependencies = [ "serde_json", "tokio", "zenoh", - "zenoh-core", - "zenoh-macros", - "zenoh-result", - "zenoh-runtime", - "zenoh-sync", - "zenoh-util", ] [[package]] diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 65f1d47af1..d86745b4fe 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -31,7 +31,13 @@ unstable = [] default = [] [dependencies] -tokio = { workspace = true, features = ["rt", "sync", "time", "macros", "io-std"] } +tokio = { workspace = true, features = [ + "rt", + "sync", + "time", + "macros", + "io-std", +] } bincode = { workspace = true } env_logger = { workspace = true } flume = { workspace = true } @@ -42,12 +48,6 @@ serde = { workspace = true, features = ["default"] } serde_cbor = { workspace = true } serde_json = { workspace = true } zenoh = { workspace = true, features = ["unstable"], default-features = false } -zenoh-core = { workspace = true } -zenoh-macros = { workspace = true } -zenoh-result = { workspace = true } -zenoh-sync = { workspace = true } -zenoh-util = { workspace = true } -zenoh-runtime = { workspace = true } [dev-dependencies] clap = { workspace = true, features = ["derive"] } diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index d6175b502d..90fefae638 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,6 +25,9 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tokio::task::JoinHandle; +use zenoh::core::AsyncResolve; +use zenoh::internal::bail; +use zenoh::internal::Condition; use zenoh::key_expr::keyexpr; use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; @@ -37,9 +40,6 @@ use zenoh::session::Session; use zenoh::session::SessionDeclarations; use zenoh::Error as ZError; use zenoh::Result as ZResult; -use zenoh_core::AsyncResolve; -use zenoh_result::bail; -use zenoh_sync::Condition; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 7ac880fd8c..a59e057371 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -23,9 +23,9 @@ pub use querying_subscriber::{ pub use session_ext::SessionExt; pub use subscriber_ext::SubscriberBuilderExt; pub use subscriber_ext::SubscriberForward; +use zenoh::internal::zerror; use zenoh::query::Reply; use zenoh::{sample::Sample, Result as ZResult}; -use zenoh_core::zerror; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 25311ca647..ac37eaeafa 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -15,15 +15,17 @@ use flume::{bounded, Sender}; use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; use std::future::Ready; +use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh::internal::ResolveFuture; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::queryable::{Query, Queryable}; +use zenoh::runtime::ZRuntime; use zenoh::sample::{Locality, Sample}; use zenoh::selector::Parameters; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; -use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; -use zenoh_result::{bail, ZResult}; -use zenoh_util::core::ResolveFuture; +use zenoh::Error; +use zenoh::{internal::bail, Result as ZResult}; /// The builder of PublicationCache, allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -57,7 +59,7 @@ impl<'a, 'b, 'c> PublicationCacheBuilder<'a, 'b, 'c> { pub fn queryable_prefix(mut self, queryable_prefix: TryIntoKeyExpr) -> Self where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { self.queryable_prefix = Some(queryable_prefix.try_into().map_err(Into::into)); self @@ -65,7 +67,7 @@ impl<'a, 'b, 'c> PublicationCacheBuilder<'a, 'b, 'c> { /// Restrict the matching queries that will be receive by this [`PublicationCache`]'s queryable /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh_macros::unstable] + #[zenoh::internal::unstable] #[inline] pub fn queryable_allowed_origin(mut self, origin: Locality) -> Self { self.queryable_origin = Some(origin); @@ -169,7 +171,7 @@ impl<'a> PublicationCache<'a> { // TODO(yuyuan): use CancellationToken to manage it let (stoptx, stoprx) = bounded::(1); - zenoh_runtime::ZRuntime::TX.spawn(async move { + ZRuntime::TX.spawn(async move { let mut cache: HashMap> = HashMap::with_capacity(resources_limit.unwrap_or(32)); let limit = resources_limit.unwrap_or(usize::MAX); diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index d5ed3e9987..9502a3a7b2 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -17,7 +17,9 @@ use std::future::Ready; use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; +use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; +use zenoh::internal::zlock; use zenoh::key_expr::KeyExpr; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; use zenoh::sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}; @@ -25,8 +27,7 @@ use zenoh::selector::Selector; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; -use zenoh::Result as ZResult; -use zenoh_core::{zlock, AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh::{Error, Result as ZResult}; use crate::ExtractSample; @@ -162,7 +163,7 @@ impl<'a, 'b, Handler> QueryingSubscriberBuilder<'a, 'b, crate::UserSpace, Handle /// Restrict the matching publications that will be receive by this [`Subscriber`] /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh_macros::unstable] + #[zenoh::internal::unstable] #[inline] pub fn allowed_origin(mut self, origin: Locality) -> Self { self.origin = origin; @@ -174,7 +175,7 @@ impl<'a, 'b, Handler> QueryingSubscriberBuilder<'a, 'b, crate::UserSpace, Handle pub fn query_selector(mut self, query_selector: IntoSelector) -> Self where IntoSelector: TryInto>, - >>::Error: Into, + >>::Error: Into, { self.query_selector = Some(query_selector.try_into().map_err(Into::into)); self @@ -522,7 +523,7 @@ where /// Restrict the matching publications that will be receive by this [`FetchingSubscriber`] /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh_macros::unstable] + #[zenoh::internal::unstable] #[inline] pub fn allowed_origin(mut self, origin: Locality) -> Self { self.origin = origin; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 2dd0fbd873..ab178ae70f 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -17,6 +17,7 @@ use std::sync::Arc; use zenoh::{ key_expr::KeyExpr, session::{Session, SessionRef}, + Error, }; /// Some extensions to the [`zenoh::Session`](zenoh::Session) @@ -27,7 +28,7 @@ pub trait SessionExt<'s, 'a> { ) -> PublicationCacheBuilder<'a, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into; + >>::Error: Into; } impl<'s, 'a> SessionExt<'s, 'a> for SessionRef<'a> { @@ -37,7 +38,7 @@ impl<'s, 'a> SessionExt<'s, 'a> for SessionRef<'a> { ) -> PublicationCacheBuilder<'a, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { PublicationCacheBuilder::new(self.clone(), pub_key_expr.try_into().map_err(Into::into)) } @@ -50,7 +51,7 @@ impl<'a> SessionExt<'a, 'a> for Session { ) -> PublicationCacheBuilder<'a, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { SessionRef::Borrow(self).declare_publication_cache(pub_key_expr) } @@ -80,7 +81,7 @@ impl<'s> SessionExt<'s, 'static> for Arc { ) -> PublicationCacheBuilder<'static, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { SessionRef::Shared(self.clone()).declare_publication_cache(pub_key_expr) } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 9cbe4ca5e5..2e3b34bdf2 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -118,9 +118,21 @@ pub use crate::api::session::open; pub mod core { pub use zenoh_core::AsyncResolve; + pub use zenoh_core::Resolvable; + pub use zenoh_core::Resolve; pub use zenoh_core::SyncResolve; } +#[doc(hidden)] +pub mod internal { + pub use zenoh_core::zerror; + pub use zenoh_core::zlock; + pub use zenoh_macros::unstable; + pub use zenoh_result::bail; + pub use zenoh_sync::Condition; + pub use zenoh_util::core::ResolveFuture; +} + /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub mod buffers { @@ -259,8 +271,10 @@ pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; } +#[doc(hidden)] pub mod runtime { pub use crate::net::runtime::{AdminSpace, Runtime}; + pub use zenoh_runtime::ZRuntime; } pub mod config { From 4d05c9f824e1fccb5cf63c1abe7916aba5f8c00a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 18:43:59 +0200 Subject: [PATCH 162/357] dependencis removed --- Cargo.lock | 3 --- plugins/zenoh-plugin-storage-manager/Cargo.toml | 3 --- plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/lib.rs | 4 ++-- .../zenoh-plugin-storage-manager/src/memory_backend/mod.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/replica/storage.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs | 2 +- 7 files changed, 6 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2f2b0f5e7..93eef060a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4955,10 +4955,7 @@ dependencies = [ "serde_json", "urlencoding", "zenoh", - "zenoh-collections", - "zenoh-core", "zenoh-plugin-trait", - "zenoh-result", "zenoh-util", "zenoh_backend_traits", ] diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index fe9359f696..2b2a6a3f2c 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -47,10 +47,7 @@ serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } urlencoding = { workspace = true } zenoh = { workspace = true, features = ["unstable"] } -zenoh-collections = { workspace = true } -zenoh-core = { workspace = true } zenoh-plugin-trait = { workspace = true } -zenoh-result = { workspace = true } zenoh-util = { workspace = true } zenoh_backend_traits = { workspace = true } diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index dcce49f5da..3837d26dda 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -15,9 +15,9 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; use zenoh::session::Session; +use zenoh::Result as ZResult; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; -use zenoh_result::ZResult; pub struct StoreIntercept { pub storage: Box, diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index bea144e39a..e3d11be4c8 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -28,24 +28,24 @@ use std::sync::Arc; use std::sync::Mutex; use storages_mgt::StorageMessage; use zenoh::core::SyncResolve; +use zenoh::internal::zlock; use zenoh::key_expr::keyexpr; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::runtime::Runtime; use zenoh::selector::Selector; use zenoh::session::Session; +use zenoh::Result as ZResult; use zenoh_backend_traits::config::ConfigDiff; use zenoh_backend_traits::config::PluginConfig; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::config::VolumeConfig; use zenoh_backend_traits::VolumeInstance; -use zenoh_core::zlock; use zenoh_plugin_trait::plugin_long_version; use zenoh_plugin_trait::plugin_version; use zenoh_plugin_trait::Plugin; use zenoh_plugin_trait::PluginControl; use zenoh_plugin_trait::PluginReport; use zenoh_plugin_trait::PluginStatusRec; -use zenoh_result::ZResult; use zenoh_util::LibLoader; mod backends_mgt; diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index 0924279cb2..cd491ba01c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -18,10 +18,10 @@ use std::sync::Arc; use zenoh::key_expr::OwnedKeyExpr; use zenoh::time::Timestamp; use zenoh::value::Value; +use zenoh::Result as ZResult; use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; use zenoh_backend_traits::*; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; -use zenoh_result::ZResult; use crate::MEMORY_BACKEND_NAME; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f0733c2371..c4d298ba83 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -24,6 +24,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::core::AsyncResolve; +use zenoh::internal::bail; use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; use zenoh::key_expr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; use zenoh::key_expr::keyexpr_tree::{KeBoxTree, NonWild, UnknownWildness}; @@ -38,7 +39,6 @@ use zenoh::value::Value; use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_result::bail; use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 8643429a65..8ee9eb7218 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -13,8 +13,8 @@ // use async_std::sync::Arc; use zenoh::session::Session; +use zenoh::Result as ZResult; use zenoh_backend_traits::config::StorageConfig; -use zenoh_result::ZResult; pub use super::replica::{Replica, StorageService}; From 9ab5fc94be36c3ff7d3cae649332005646bb3616 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 18:47:37 +0200 Subject: [PATCH 163/357] dependency removed --- Cargo.lock | 1 - plugins/zenoh-plugin-storage-manager/Cargo.toml | 1 - plugins/zenoh-plugin-storage-manager/src/lib.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/replica/storage.rs | 2 +- zenoh/src/lib.rs | 2 ++ 5 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 93eef060a7..ba7cb0efbd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4956,7 +4956,6 @@ dependencies = [ "urlencoding", "zenoh", "zenoh-plugin-trait", - "zenoh-util", "zenoh_backend_traits", ] diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 2b2a6a3f2c..ca6e5cb0fa 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -48,7 +48,6 @@ serde_json = { workspace = true } urlencoding = { workspace = true } zenoh = { workspace = true, features = ["unstable"] } zenoh-plugin-trait = { workspace = true } -zenoh-util = { workspace = true } zenoh_backend_traits = { workspace = true } [build-dependencies] diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index e3d11be4c8..e920486de8 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -30,6 +30,7 @@ use storages_mgt::StorageMessage; use zenoh::core::SyncResolve; use zenoh::internal::zlock; use zenoh::key_expr::keyexpr; +use zenoh::plugins::LibLoader; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::runtime::Runtime; use zenoh::selector::Selector; @@ -46,7 +47,6 @@ use zenoh_plugin_trait::Plugin; use zenoh_plugin_trait::PluginControl; use zenoh_plugin_trait::PluginReport; use zenoh_plugin_trait::PluginStatusRec; -use zenoh_util::LibLoader; mod backends_mgt; use backends_mgt::*; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index c4d298ba83..f5524c8eb5 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -25,6 +25,7 @@ use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::core::AsyncResolve; use zenoh::internal::bail; +use zenoh::internal::{zenoh_home, Timed, TimedEvent, Timer}; use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; use zenoh::key_expr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; use zenoh::key_expr::keyexpr_tree::{KeBoxTree, NonWild, UnknownWildness}; @@ -39,7 +40,6 @@ use zenoh::value::Value; use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 2e3b34bdf2..1c03506039 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -131,6 +131,7 @@ pub mod internal { pub use zenoh_result::bail; pub use zenoh_sync::Condition; pub use zenoh_util::core::ResolveFuture; + pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate @@ -289,6 +290,7 @@ pub mod plugins { pub use crate::api::plugins::Response; pub use crate::api::plugins::RunningPlugin; pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; + pub use zenoh_util::LibLoader; } #[cfg(feature = "shared-memory")] From 964176f9c81ee22947b97523729f56caea40d4e1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 19:03:16 +0200 Subject: [PATCH 164/357] small move --- .../zenoh-plugin-storage-manager/src/lib.rs | 2 +- zenoh/src/lib.rs | 23 ++++++++++--------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index e920486de8..701a34f4d6 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -29,8 +29,8 @@ use std::sync::Mutex; use storages_mgt::StorageMessage; use zenoh::core::SyncResolve; use zenoh::internal::zlock; +use zenoh::internal::LibLoader; use zenoh::key_expr::keyexpr; -use zenoh::plugins::LibLoader; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::runtime::Runtime; use zenoh::selector::Selector; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 1c03506039..d2aa2f07fe 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -123,17 +123,6 @@ pub mod core { pub use zenoh_core::SyncResolve; } -#[doc(hidden)] -pub mod internal { - pub use zenoh_core::zerror; - pub use zenoh_core::zlock; - pub use zenoh_macros::unstable; - pub use zenoh_result::bail; - pub use zenoh_sync::Condition; - pub use zenoh_util::core::ResolveFuture; - pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; -} - /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub mod buffers { @@ -285,12 +274,24 @@ pub mod config { }; } +#[doc(hidden)] pub mod plugins { pub use crate::api::plugins::PluginsManager; pub use crate::api::plugins::Response; pub use crate::api::plugins::RunningPlugin; pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; +} + +#[doc(hidden)] +pub mod internal { + pub use zenoh_core::zerror; + pub use zenoh_core::zlock; + pub use zenoh_macros::unstable; + pub use zenoh_result::bail; + pub use zenoh_sync::Condition; + pub use zenoh_util::core::ResolveFuture; pub use zenoh_util::LibLoader; + pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; } #[cfg(feature = "shared-memory")] From 5abc46265eaa192a990b7178ed42443e17091351 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 20:30:34 +0200 Subject: [PATCH 165/357] docs corrected --- zenoh/src/api/handlers.rs | 1 - zenoh/src/api/key_expr.rs | 31 --------------------- zenoh/src/api/liveliness.rs | 3 -- zenoh/src/api/payload.rs | 1 - zenoh/src/api/publication.rs | 1 - zenoh/src/api/query.rs | 1 - zenoh/src/api/queryable.rs | 1 - zenoh/src/api/sample.rs | 1 - zenoh/src/api/selector.rs | 2 -- zenoh/src/api/subscriber.rs | 1 - zenoh/src/api/value.rs | 1 - zenoh/src/lib.rs | 54 +++++++++++++++++++++++++++++++++--- 12 files changed, 50 insertions(+), 48 deletions(-) diff --git a/zenoh/src/api/handlers.rs b/zenoh/src/api/handlers.rs index 7610fe43d8..f17bdafca3 100644 --- a/zenoh/src/api/handlers.rs +++ b/zenoh/src/api/handlers.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Callback handler trait. use super::session::API_DATA_RECEPTION_CHANNEL_SIZE; use std::sync::{Arc, Mutex, Weak}; use zenoh_collections::RingBuffer as RingBufferInner; diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 0eb7515181..ff174186cb 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -12,37 +12,6 @@ // ZettaScale Zenoh Team, // -//! [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. -//! -//! In Zenoh, operations are performed on keys. To allow addressing multiple keys with a single operation, we use Key Expressions (KE). -//! KEs are a small language that express sets of keys through a glob-like language. -//! -//! These semantics can be a bit difficult to implement, so this module provides the following facilities: -//! -//! # Storing Key Expressions -//! This module provides 3 flavours to store strings that have been validated to respect the KE syntax: -//! - [`keyexpr`] is the equivalent of a [`str`], -//! - [`OwnedKeyExpr`] works like an [`std::sync::Arc`], -//! - [`KeyExpr`] works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize -//! routing and network usage. -//! -//! All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, -//! or even if a [`keyexpr::includes`] another. -//! -//! # Tying values to Key Expressions -//! When storing values tied to Key Expressions, you might want something more specialized than a [`HashMap`](std::collections::HashMap) if you want to respect -//! the Key Expression semantics with high performance. -//! -//! Enter [KeTrees](keyexpr_tree). These are data-structures specially built to store KE-value pairs in a manner that supports the set-semantics of KEs. -//! -//! # Building and parsing Key Expressions -//! A common issue in REST API is the association of meaning to sections of the URL, and respecting that API in a convenient manner. -//! The same issue arises naturally when designing a KE space, and [`KeFormat`](format::KeFormat) was designed to help you with this, -//! both in constructing and in parsing KEs that fit the formats you've defined. -//! -//! [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -//! as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. - use super::{ selector::Selector, session::{Session, Undeclarable}, diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index c2a075ee52..78f32efcba 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -12,9 +12,6 @@ // ZettaScale Zenoh Team, // -//! Liveliness primitives. -//! -//! see [`Liveliness`] use super::{ handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, diff --git a/zenoh/src/api/payload.rs b/zenoh/src/api/payload.rs index ed2a58145c..a571f8433c 100644 --- a/zenoh/src/api/payload.rs +++ b/zenoh/src/api/payload.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Payload primitives. use crate::buffers::ZBuf; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index ad8be76e65..8b029367de 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Publishing primitives. use super::{ builders::publication::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 8b0f0c9f6c..b196f2bcaf 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Query primitives. use super::{ builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, encoding::Encoding, diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index c966e02101..3e5117356a 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Queryable primitives. use super::{ builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, encoding::Encoding, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index a51264e1a4..4321ac6d6c 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Sample primitives use super::{ builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, encoding::Encoding, diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 144b4ee8a0..c966015721 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // -//! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries - use super::{key_expr::KeyExpr, queryable::Query}; use std::{ borrow::{Borrow, Cow}, diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 7ad0160ae3..8778d9bc38 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Subscribing primitives. use super::{ handlers::{locked, Callback, DefaultHandler, IntoHandler}, key_expr::KeyExpr, diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index 60586ad040..3393b5477b 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -//! Value primitives. use super::{builders::sample::ValueBuilderTrait, encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index d2aa2f07fe..35f8920798 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -130,6 +130,36 @@ pub mod buffers { pub use zenoh_buffers::{ZBuf, ZSlice}; } +/// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. +/// +/// In Zenoh, operations are performed on keys. To allow addressing multiple keys with a single operation, we use Key Expressions (KE). +/// KEs are a small language that express sets of keys through a glob-like language. +/// +/// These semantics can be a bit difficult to implement, so this module provides the following facilities: +/// +/// # Storing Key Expressions +/// This module provides 3 flavours to store strings that have been validated to respect the KE syntax: +/// - [`keyexpr`] is the equivalent of a [`str`], +/// - [`OwnedKeyExpr`] works like an [`std::sync::Arc`], +/// - [`KeyExpr`] works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize +/// routing and network usage. +/// +/// All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, +/// or even if a [`keyexpr::includes`] another. +/// +/// # Tying values to Key Expressions +/// When storing values tied to Key Expressions, you might want something more specialized than a [`HashMap`](std::collections::HashMap) if you want to respect +/// the Key Expression semantics with high performance. +/// +/// Enter [KeTrees](keyexpr_tree). These are data-structures specially built to store KE-value pairs in a manner that supports the set-semantics of KEs. +/// +/// # Building and parsing Key Expressions +/// A common issue in REST API is the association of meaning to sections of the URL, and respecting that API in a convenient manner. +/// The same issue arises naturally when designing a KE space, and [`KeFormat`](format::KeFormat) was designed to help you with this, +/// both in constructing and in parsing KEs that fit the formats you've defined. +/// +/// [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +/// as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. pub mod key_expr { pub mod keyexpr_tree { pub use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; @@ -151,6 +181,7 @@ pub mod key_expr { } } +/// Zenoh [`Session`](crate::session::Session) and associated types pub mod session { pub use crate::api::builders::publication::SessionDeleteBuilder; pub use crate::api::builders::publication::SessionPutBuilder; @@ -162,6 +193,7 @@ pub mod session { pub use crate::api::session::SessionRef; } +/// Sample primitives pub mod sample { pub use crate::api::builders::sample::QoSBuilderTrait; pub use crate::api::builders::sample::SampleBuilder; @@ -178,14 +210,17 @@ pub mod sample { pub use crate::api::sample::SourceInfo; } +/// Value primitives pub mod value { pub use crate::api::value::Value; } +/// Encoding support pub mod encoding { pub use crate::api::encoding::Encoding; } +/// Payload primitives pub mod payload { pub use crate::api::payload::Deserialize; pub use crate::api::payload::Payload; @@ -194,6 +229,7 @@ pub mod payload { pub use crate::api::payload::StringOrBase64; } +/// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries pub mod selector { pub use crate::api::selector::Parameter; pub use crate::api::selector::Parameters; @@ -201,6 +237,7 @@ pub mod selector { pub use crate::api::selector::TIME_RANGE_KEY; } +/// Subscribing primitives pub mod subscriber { pub use crate::api::subscriber::FlumeSubscriber; pub use crate::api::subscriber::Subscriber; @@ -209,6 +246,7 @@ pub mod subscriber { pub use zenoh_protocol::core::Reliability; } +/// Publishing primitives pub mod publication { pub use crate::api::builders::publication::PublisherBuilder; pub use crate::api::publication::Priority; @@ -218,6 +256,7 @@ pub mod publication { pub use zenoh_protocol::core::CongestionControl; } +/// Query primitives pub mod query { pub use crate::api::query::Mode; pub use crate::api::query::Reply; @@ -228,12 +267,14 @@ pub mod query { pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; } +/// Queryable primitives pub mod queryable { pub use crate::api::queryable::Query; pub use crate::api::queryable::Queryable; pub use crate::api::queryable::QueryableBuilder; } +/// Callback handler trait pub mod handlers { pub use crate::api::handlers::locked; pub use crate::api::handlers::DefaultHandler; @@ -241,6 +282,7 @@ pub mod handlers { pub use crate::api::handlers::RingBuffer; } +/// Scouting primitives pub mod scouting { pub use crate::api::scouting::scout; pub use crate::api::scouting::ScoutBuilder; @@ -250,12 +292,14 @@ pub mod scouting { pub use zenoh_protocol::scouting::Hello; } +/// Liveliness primitives #[cfg(feature = "unstable")] pub mod liveliness { pub use crate::api::liveliness::Liveliness; pub use crate::api::liveliness::LivelinessSubscriberBuilder; } +/// Timestamp support pub mod time { pub use crate::api::time::new_reception_timestamp; pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; @@ -267,11 +311,13 @@ pub mod runtime { pub use zenoh_runtime::ZRuntime; } +/// Configuration to pass to [`open`](crate::session::open) and [`scout`](crate::scouting::scout) functions and associated constants pub mod config { - pub use zenoh_config::{ - client, default, peer, Config, EndPoint, Locator, ModeDependentValue, PermissionsConf, - PluginLoad, ValidatedMap, ZenohId, - }; + // pub use zenoh_config::{ + // client, default, peer, Config, EndPoint, Locator, ModeDependentValue, PermissionsConf, + // PluginLoad, ValidatedMap, ZenohId, + // }; + pub use zenoh_config::*; } #[doc(hidden)] From cbc8d7d32fa2ac51eef3aa427551a12786dc071d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 21:00:03 +0200 Subject: [PATCH 166/357] clippy fixes --- .../src/replica/digest.rs | 14 +++++++------- .../tests/operations.rs | 2 +- .../zenoh-plugin-storage-manager/tests/wildcard.rs | 2 +- zenoh/src/api/selector.rs | 2 +- zenoh/src/lib.rs | 6 ++++-- 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs index 98faa24aa2..ef09481880 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs @@ -770,7 +770,7 @@ impl Digest { #[test] fn test_create_digest_empty_initial() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = Digest::create_digest( Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), @@ -802,7 +802,7 @@ fn test_create_digest_empty_initial() { #[test] fn test_create_digest_with_initial_hot() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = Digest::create_digest( Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), @@ -858,7 +858,7 @@ fn test_create_digest_with_initial_hot() { #[test] fn test_create_digest_with_initial_warm() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = Digest::create_digest( Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), @@ -914,7 +914,7 @@ fn test_create_digest_with_initial_warm() { #[test] fn test_create_digest_with_initial_cold() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = Digest::create_digest( Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), @@ -970,7 +970,7 @@ fn test_create_digest_with_initial_cold() { #[test] fn test_update_digest_add_content() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = async_std::task::block_on(Digest::update_digest( Digest { @@ -1034,7 +1034,7 @@ fn test_update_digest_add_content() { #[test] fn test_update_digest_remove_content() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = async_std::task::block_on(Digest::update_digest( Digest { @@ -1098,7 +1098,7 @@ fn test_update_digest_remove_content() { #[test] fn test_update_remove_digest() { async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); + zenoh::internal::zasync_executor_init!(); }); let created = Digest::create_digest( Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 1def746449..77e62b2f0d 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -22,12 +22,12 @@ use std::thread::sleep; use async_std::task; use zenoh::config::{Config, ValidatedMap}; use zenoh::core::AsyncResolve; +use zenoh::internal::zasync_executor_init; use zenoh::payload::StringOrBase64; use zenoh::query::Reply; use zenoh::sample::Sample; use zenoh::session::Session; use zenoh::time::Timestamp; -use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index bf1ecf707f..71decb8fee 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -23,12 +23,12 @@ use std::thread::sleep; use async_std::task; use zenoh::config::{Config, ValidatedMap}; use zenoh::core::AsyncResolve; +use zenoh::internal::zasync_executor_init; use zenoh::payload::StringOrBase64; use zenoh::query::Reply; use zenoh::sample::Sample; use zenoh::session::Session; use zenoh::time::Timestamp; -use zenoh_core::zasync_executor_init; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index c966015721..04d641725e 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -22,7 +22,7 @@ use std::{ }; use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh_result::ZResult; -pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; +use zenoh_util::time_range::TimeRange; /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 35f8920798..a027a9e9ff 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -201,13 +201,13 @@ pub mod sample { pub use crate::api::builders::sample::TimestampBuilderTrait; pub use crate::api::builders::sample::ValueBuilderTrait; #[zenoh_macros::unstable] - pub use crate::api::sample::Attachment; - #[zenoh_macros::unstable] pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; pub use crate::api::sample::SampleKind; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; + #[zenoh_macros::unstable] + pub use crate::api::sample::{Attachment, AttachmentBuilder, AttachmentIterator}; } /// Value primitives @@ -235,6 +235,7 @@ pub mod selector { pub use crate::api::selector::Parameters; pub use crate::api::selector::Selector; pub use crate::api::selector::TIME_RANGE_KEY; + pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; } /// Subscribing primitives @@ -330,6 +331,7 @@ pub mod plugins { #[doc(hidden)] pub mod internal { + pub use zenoh_core::zasync_executor_init; pub use zenoh_core::zerror; pub use zenoh_core::zlock; pub use zenoh_macros::unstable; From 5fb2531c35cf84d39241c7e3407a40982eccf006 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 21:12:21 +0200 Subject: [PATCH 167/357] result in zenoh::core --- commons/zenoh-macros/src/lib.rs | 2 +- plugins/zenoh-backend-traits/src/config.rs | 2 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- .../src/backends_mgt.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/lib.rs | 2 +- .../src/memory_backend/mod.rs | 2 +- .../src/replica/storage.rs | 2 +- .../src/storages_mgt.rs | 2 +- zenoh-ext/src/group.rs | 4 ++-- zenoh-ext/src/lib.rs | 2 +- zenoh-ext/src/publication_cache.rs | 4 ++-- zenoh-ext/src/querying_subscriber.rs | 2 +- zenoh-ext/src/session_ext.rs | 2 +- zenoh-ext/src/subscriber_ext.rs | 2 +- zenoh/src/lib.rs | 14 +++++++------- zenoh/tests/routing.rs | 2 +- zenohd/src/main.rs | 2 +- 17 files changed, 25 insertions(+), 25 deletions(-) diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index b77dffeba0..655747cd86 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -152,7 +152,7 @@ fn keformat_support(source: &str) -> proc_macro2::TokenStream { let formatter_doc = format!("And instance of a formatter for `{source}`."); quote! { - use ::zenoh::Result as ZResult; + use ::zenoh::core::Result as ZResult; const FORMAT_INNER: ::zenoh::key_expr::format::KeFormat<'static, [::zenoh::key_expr::format::Segment<'static>; #len]> = unsafe { ::zenoh::key_expr::format::macro_support::const_new(#source, [#(#segments)*]) }; diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index cfbc1566c8..096255fb59 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -17,7 +17,7 @@ use schemars::JsonSchema; use serde_json::{Map, Value}; use std::convert::TryFrom; use std::time::Duration; -use zenoh::{key_expr::keyexpr, key_expr::OwnedKeyExpr, Result as ZResult}; +use zenoh::{core::Result as ZResult, key_expr::keyexpr, key_expr::OwnedKeyExpr}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 4340c454fa..a8910d784b 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -125,10 +125,10 @@ use async_trait::async_trait; use const_format::concatcp; +use zenoh::core::Result as ZResult; use zenoh::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh::time::Timestamp; use zenoh::value::Value; -use zenoh::Result as ZResult; use zenoh_plugin_trait::{PluginControl, PluginInstance, PluginStatusRec, StructVersion}; use zenoh_util::concat_enabled_features; diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index 3837d26dda..cae784cb16 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,8 +14,8 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; +use zenoh::core::Result as ZResult; use zenoh::session::Session; -use zenoh::Result as ZResult; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 701a34f4d6..a9d610bb8e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -27,6 +27,7 @@ use std::convert::TryFrom; use std::sync::Arc; use std::sync::Mutex; use storages_mgt::StorageMessage; +use zenoh::core::Result as ZResult; use zenoh::core::SyncResolve; use zenoh::internal::zlock; use zenoh::internal::LibLoader; @@ -35,7 +36,6 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::runtime::Runtime; use zenoh::selector::Selector; use zenoh::session::Session; -use zenoh::Result as ZResult; use zenoh_backend_traits::config::ConfigDiff; use zenoh_backend_traits::config::PluginConfig; use zenoh_backend_traits::config::StorageConfig; diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index cd491ba01c..162ef8d6d0 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -15,10 +15,10 @@ use async_std::sync::RwLock; use async_trait::async_trait; use std::collections::HashMap; use std::sync::Arc; +use zenoh::core::Result as ZResult; use zenoh::key_expr::OwnedKeyExpr; use zenoh::time::Timestamp; use zenoh::value::Value; -use zenoh::Result as ZResult; use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; use zenoh_backend_traits::*; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index f5524c8eb5..8fbf84d0fb 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -37,7 +37,7 @@ use zenoh::sample::{SampleBuilder, ValueBuilderTrait}; use zenoh::session::SessionDeclarations; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; -use zenoh::{session::Session, Result as ZResult}; +use zenoh::{core::Result as ZResult, session::Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 8ee9eb7218..15ef063656 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use async_std::sync::Arc; +use zenoh::core::Result as ZResult; use zenoh::session::Session; -use zenoh::Result as ZResult; use zenoh_backend_traits::config::StorageConfig; pub use super::replica::{Replica, StorageService}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 90fefae638..32eefaf685 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -26,6 +26,8 @@ use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tokio::task::JoinHandle; use zenoh::core::AsyncResolve; +use zenoh::core::Error as ZError; +use zenoh::core::Result as ZResult; use zenoh::internal::bail; use zenoh::internal::Condition; use zenoh::key_expr::keyexpr; @@ -38,8 +40,6 @@ use zenoh::query::ConsolidationMode; use zenoh::sample::QoSBuilderTrait; use zenoh::session::Session; use zenoh::session::SessionDeclarations; -use zenoh::Error as ZError; -use zenoh::Result as ZResult; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index a59e057371..ea4dbbd6d4 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -25,7 +25,7 @@ pub use subscriber_ext::SubscriberBuilderExt; pub use subscriber_ext::SubscriberForward; use zenoh::internal::zerror; use zenoh::query::Reply; -use zenoh::{sample::Sample, Result as ZResult}; +use zenoh::{core::Result as ZResult, sample::Sample}; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index ac37eaeafa..3ae9ec6b5a 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -15,6 +15,7 @@ use flume::{bounded, Sender}; use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; use std::future::Ready; +use zenoh::core::Error; use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh::internal::ResolveFuture; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; @@ -24,8 +25,7 @@ use zenoh::sample::{Locality, Sample}; use zenoh::selector::Parameters; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; -use zenoh::Error; -use zenoh::{internal::bail, Result as ZResult}; +use zenoh::{core::Result as ZResult, internal::bail}; /// The builder of PublicationCache, allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 9502a3a7b2..6d0baf5d25 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -27,7 +27,7 @@ use zenoh::selector::Selector; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::{Reliability, Subscriber}; use zenoh::time::{new_reception_timestamp, Timestamp}; -use zenoh::{Error, Result as ZResult}; +use zenoh::{core::Error, core::Result as ZResult}; use crate::ExtractSample; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index ab178ae70f..3f23239b29 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -15,9 +15,9 @@ use super::PublicationCacheBuilder; use std::convert::TryInto; use std::sync::Arc; use zenoh::{ + core::Error, key_expr::KeyExpr, session::{Session, SessionRef}, - Error, }; /// Some extensions to the [`zenoh::Session`](zenoh::Session) diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index c758f910c2..c9004bc99b 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -14,9 +14,9 @@ use flume::r#async::RecvStream; use futures::stream::{Forward, Map}; use std::time::Duration; +use zenoh::core::Result as ZResult; use zenoh::query::ReplyKeyExpr; use zenoh::sample::Locality; -use zenoh::Result as ZResult; use zenoh::{ liveliness::LivelinessSubscriberBuilder, query::{QueryConsolidation, QueryTarget}, diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index a027a9e9ff..17e95fb3fc 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -85,11 +85,6 @@ mod net; use git_version::git_version; use zenoh_util::concat_enabled_features; -/// A zenoh error. -pub use zenoh_result::Error; -/// A zenoh result. -pub use zenoh_result::ZResult as Result; - const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); pub const FEATURES: &str = concat_enabled_features!( @@ -116,11 +111,18 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; +// pub mod prelude; + +/// Zenoh core types pub mod core { pub use zenoh_core::AsyncResolve; pub use zenoh_core::Resolvable; pub use zenoh_core::Resolve; pub use zenoh_core::SyncResolve; + /// A zenoh error. + pub use zenoh_result::Error; + /// A zenoh result. + pub use zenoh_result::ZResult as Result; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate @@ -346,5 +348,3 @@ pub mod internal { pub mod shm { pub use zenoh_shm::SharedMemoryManager; } - -// pub mod prelude; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index c0879bdb7e..f79da05483 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -17,10 +17,10 @@ use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::config::{Config, ModeDependentValue}; +use zenoh::core::Result; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; use zenoh::session::{Session, SessionDeclarations}; -use zenoh::Result; use zenoh_core::ztimeout; use zenoh_core::AsyncResolve; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 781fc308df..ac4dc87f45 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -17,10 +17,10 @@ use git_version::git_version; use std::collections::HashSet; use zenoh::config::EndPoint; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, PluginLoad, ValidatedMap}; +use zenoh::core::Result; use zenoh::plugins::PluginsManager; use zenoh::runtime::{AdminSpace, Runtime}; use zenoh::scouting::WhatAmI; -use zenoh::Result; const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); From 61a4b61a79d89b736c192f72b10a046cc03f4a03 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 21:54:17 +0200 Subject: [PATCH 168/357] prelude --- commons/zenoh-config/src/lib.rs | 2 +- commons/zenoh-config/src/mode_dependent.rs | 2 +- zenoh/src/lib.rs | 11 +-- zenoh/src/prelude.rs | 92 +++++++++++----------- 4 files changed, 51 insertions(+), 56 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 2b5485fa6b..4843d575b0 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -35,7 +35,7 @@ use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; use zenoh_core::zlock; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, Priority, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, }; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Bits}, diff --git a/commons/zenoh-config/src/mode_dependent.rs b/commons/zenoh-config/src/mode_dependent.rs index 91e366f452..9f6cc2c7e4 100644 --- a/commons/zenoh-config/src/mode_dependent.rs +++ b/commons/zenoh-config/src/mode_dependent.rs @@ -19,7 +19,7 @@ use serde::{ use std::fmt; use std::marker::PhantomData; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, Priority, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, }; pub trait ModeDependent { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 17e95fb3fc..b3c88a109b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -82,12 +82,8 @@ extern crate zenoh_result; mod api; mod net; -use git_version::git_version; -use zenoh_util::concat_enabled_features; - -const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); - -pub const FEATURES: &str = concat_enabled_features!( +const GIT_VERSION: &str = git_version::git_version!(prefix = "v", cargo_prefix = "v"); +pub const FEATURES: &str = zenoh_util::concat_enabled_features!( prefix = "zenoh", features = [ "auth_pubkey", @@ -111,7 +107,7 @@ pub const FEATURES: &str = concat_enabled_features!( pub use crate::api::session::open; -// pub mod prelude; +pub mod prelude; /// Zenoh core types pub mod core { @@ -188,6 +184,7 @@ pub mod session { pub use crate::api::builders::publication::SessionDeleteBuilder; pub use crate::api::builders::publication::SessionPutBuilder; #[zenoh_macros::unstable] + #[doc(hidden)] pub use crate::api::session::init; pub use crate::api::session::open; pub use crate::api::session::Session; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index d482ffae75..d9230aa3fe 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -16,62 +16,60 @@ //! //! This prelude is similar to the standard library's prelude in that you'll //! almost always want to import its entire contents, but unlike the standard -//! library's prelude you'll have to do so manually. An example of using this is: +//! library's prelude you'll have to do so manually. +//! +//! There are three variants of the prelude: full, sync and async. The sync one excludes the [`AsyncResolve`](crate::core::AsyncResolve) trait and the async one excludes the [`SyncResolve`](crate::core::SyncResolve) trait. +//! When specific sync or async prelude is included, the `res()` function of buildes works synchronously or asynchronously, respectively. +//! +//! If root prelude is included, the `res_sync()` or `res_async()` function of builders should be called explicitly. +//! +//! Examples: //! //! ``` -//! use zenoh::prelude::r#async::*; +//!`use zenoh::prelude::*; +//! ``` +//! ``` +//!`use zenoh::prelude::sync::*; +//! ``` +//! ``` +//!`use zenoh::prelude::r#async::*; //! ``` -// pub use common::*; -// pub(crate) mod common { -// pub use crate::api::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; -// pub use zenoh_buffers::{ -// buffer::{Buffer, SplitBuffer}, -// reader::HasReader, -// writer::HasWriter, -// }; -// pub use zenoh_core::Resolve; - -// pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; -// #[zenoh_macros::unstable] -// pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; - -// pub use crate::config::{self, Config}; -// pub use crate::handlers::IntoHandler; -// pub use crate::selector::{Parameter, Parameters, Selector}; -// pub use crate::session::{Session, SessionDeclarations}; - -// pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; - -// pub use crate::api::encoding::Encoding; -// pub use crate::api::value::Value; -/// The encoding of a zenoh `Value`. -// pub use crate::payload::{Deserialize, Payload, Serialize}; - -// #[zenoh_macros::unstable] -// pub use crate::api::sample::Locality; -// #[zenoh_macros::unstable] -// pub use crate::api::sample::SourceInfo; -// pub use crate::api::sample::{Sample, SampleKind}; -// pub use crate::api::publication::Priority; -// #[zenoh_macros::unstable] -// pub use crate::api::publication::PublisherDeclarations; -// pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; -// pub use crate::api::builders::sample::{ -// QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, -// }; +// All API types and traits in flat namespace +pub(crate) mod flat { + pub use crate::buffers::*; + pub use crate::config::*; + pub use crate::core::{AsyncResolve, Error, Resolvable, Resolve, Result, SyncResolve}; + pub use crate::encoding::*; + pub use crate::handlers::*; + pub use crate::key_expr::*; + pub use crate::payload::*; + pub use crate::plugins::*; + pub use crate::publication::*; + pub use crate::query::*; + pub use crate::queryable::*; + pub use crate::sample::*; + pub use crate::scouting::*; + pub use crate::selector::*; + pub use crate::session::*; + #[cfg(feature = "shared-memory")] + pub use crate::shm::*; + pub use crate::subscriber::*; + pub use crate::time::*; + pub use crate::value::*; +} -// #[zenoh_macros::unstable] -// pub use crate::api::builders::sample::SampleBuilderTrait; -// } +pub use crate::core::AsyncResolve; +pub use crate::core::SyncResolve; +pub use flat::*; /// Prelude to import when using Zenoh's sync API. pub mod sync { - // pub use super::common::*; - pub use zenoh_core::SyncResolve; + pub use super::flat::*; + pub use crate::core::SyncResolve; } /// Prelude to import when using Zenoh's async API. pub mod r#async { - // pub use super::common::*; - pub use zenoh_core::AsyncResolve; + pub use super::flat::*; + pub use crate::core::AsyncResolve; } From 5b0d82d560f5c9814c4f18a17e7bc8619b6b3220 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 22:05:28 +0200 Subject: [PATCH 169/357] examples updated --- examples/examples/z_delete.rs | 4 +--- examples/examples/z_formats.rs | 4 +--- examples/examples/z_forward.rs | 5 +---- examples/examples/z_get.rs | 6 +----- examples/examples/z_get_liveliness.rs | 2 -- examples/examples/z_info.rs | 5 +---- examples/examples/z_liveliness.rs | 2 -- examples/examples/z_ping.rs | 8 +------- examples/examples/z_pong.rs | 7 +------ examples/examples/z_pub.rs | 6 +----- examples/examples/z_pub_shm.rs | 3 --- examples/examples/z_pub_shm_thr.rs | 3 --- examples/examples/z_pub_thr.rs | 6 +----- examples/examples/z_pull.rs | 5 +---- examples/examples/z_put.rs | 4 +--- examples/examples/z_put_float.rs | 4 +--- examples/examples/z_queryable.rs | 5 +---- examples/examples/z_scout.rs | 5 +---- examples/examples/z_storage.rs | 6 +----- examples/examples/z_sub.rs | 5 +---- examples/examples/z_sub_liveliness.rs | 2 -- examples/examples/z_sub_thr.rs | 4 +--- zenoh-ext/examples/z_member.rs | 3 +-- zenoh-ext/examples/z_pub_cache.rs | 3 +-- zenoh-ext/examples/z_query_sub.rs | 5 +---- zenoh-ext/examples/z_view_size.rs | 3 +-- zenoh/src/prelude.rs | 2 +- 27 files changed, 22 insertions(+), 95 deletions(-) diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index f441c1b68d..6823083c51 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index eab5aa035a..7d26eb7775 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // -use zenoh::key_expr::kedefine; -use zenoh::key_expr::keformat; -use zenoh::key_expr::keyexpr; +use zenoh::prelude::*; kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 06d85b3931..feb00d5ea4 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 77304770a4..9cdc963c0c 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -13,11 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::query::QueryTarget; -use zenoh::sample::ValueBuilderTrait; -use zenoh::selector::Selector; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 5e6fd06c84..ec53c8ad8e 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -13,8 +13,6 @@ // use clap::Parser; use std::time::Duration; -use zenoh::config::Config; -use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index bb81030b3a..a42f848e69 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::config::ZenohId; -use zenoh::core::AsyncResolve; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 2a93f50db8..c8ee8af29d 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 08cd9e8817..81c4d7141e 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -13,13 +13,7 @@ // use clap::Parser; use std::time::{Duration, Instant}; -use zenoh::config::Config; -use zenoh::core::SyncResolve; -use zenoh::key_expr::keyexpr; -use zenoh::payload::Payload; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::sync::*; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index c3225809fa..b4ca01ef9b 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -12,12 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::SyncResolve; -use zenoh::key_expr::keyexpr; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::sync::*; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index a0d8edadaf..8b88b490a8 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -13,11 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; -use zenoh::sample::SampleBuilderTrait; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index bc239ebf41..52dc52cce0 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -13,10 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::config::Config; -use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; -use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; const N: usize = 10; diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index c8a33f98fa..f488a23a8e 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::publication::CongestionControl; -use zenoh::shm::SharedMemoryManager; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index fd50118022..8d6266469e 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -14,11 +14,7 @@ use clap::Parser; use std::convert::TryInto; -use zenoh::core::SyncResolve; -use zenoh::payload::Payload; -use zenoh::publication::{CongestionControl, Priority}; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::sync::*; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index b405fd331b..7c6e83e792 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,10 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::core::AsyncResolve; -use zenoh::{ - config::Config, handlers::RingBuffer, key_expr::KeyExpr, session::SessionDeclarations, -}; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index a2c6ac2574..af52b7d57a 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 5fce2a5935..fb35bffc27 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 49a5b946a7..511525dffd 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index b0d34061d3..b5d02f6e40 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -11,10 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::scouting::scout; -use zenoh::scouting::WhatAmI; +use zenoh::prelude::r#async::*; #[tokio::main] async fn main() { diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index ed9a2b0b89..b618c859a2 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -16,11 +16,7 @@ use clap::Parser; use futures::select; use std::collections::HashMap; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::sample::{Sample, SampleKind}; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index ae1e7292e0..9c09f96620 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::key_expr::KeyExpr; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 690299dbeb..150f3e9f99 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::key_expr::KeyExpr; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index d94ca4fa0f..11eac07c26 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -13,9 +13,7 @@ // use clap::Parser; use std::time::Instant; -use zenoh::config::Config; -use zenoh::core::SyncResolve; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::sync::*; use zenoh_examples::CommonArgs; struct Stats { diff --git a/zenoh-ext/examples/z_member.rs b/zenoh-ext/examples/z_member.rs index 217c0d90e3..411f773edd 100644 --- a/zenoh-ext/examples/z_member.rs +++ b/zenoh-ext/examples/z_member.rs @@ -14,8 +14,7 @@ use futures::StreamExt; use std::sync::Arc; use std::time::Duration; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; +use zenoh::prelude::r#async::*; use zenoh_ext::group::*; #[tokio::main] diff --git a/zenoh-ext/examples/z_pub_cache.rs b/zenoh-ext/examples/z_pub_cache.rs index 50b6d11c53..ab80994e20 100644 --- a/zenoh-ext/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/z_pub_cache.rs @@ -13,8 +13,7 @@ // use clap::{arg, Command}; use std::time::Duration; -use zenoh::config::{Config, ModeDependentValue}; -use zenoh::core::AsyncResolve; +use zenoh::prelude::r#async::*; use zenoh_ext::*; #[tokio::main] diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index c3fc363069..e4e471a5d5 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -13,10 +13,7 @@ // use clap::arg; use clap::Command; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; -use zenoh::query::ReplyKeyExpr; -use zenoh::session::SessionDeclarations; +use zenoh::prelude::r#async::*; use zenoh_ext::*; #[tokio::main] diff --git a/zenoh-ext/examples/z_view_size.rs b/zenoh-ext/examples/z_view_size.rs index 8496629646..8e5b615531 100644 --- a/zenoh-ext/examples/z_view_size.rs +++ b/zenoh-ext/examples/z_view_size.rs @@ -14,8 +14,7 @@ use clap::{arg, Command}; use std::sync::Arc; use std::time::Duration; -use zenoh::config::Config; -use zenoh::core::AsyncResolve; +use zenoh::prelude::r#async::*; use zenoh_ext::group::*; #[tokio::main] diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index d9230aa3fe..87f67fe39c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -39,7 +39,7 @@ pub(crate) mod flat { pub use crate::buffers::*; pub use crate::config::*; - pub use crate::core::{AsyncResolve, Error, Resolvable, Resolve, Result, SyncResolve}; + pub use crate::core::{Error, Resolvable, Resolve, Result}; pub use crate::encoding::*; pub use crate::handlers::*; pub use crate::key_expr::*; From fa9b64beb89a8d44e78fe62a34af8b6fd43d9cba Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 22:07:47 +0200 Subject: [PATCH 170/357] format sample corrected --- examples/examples/z_formats.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index 7d26eb7775..f2698b296b 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // -use zenoh::prelude::*; +use zenoh::prelude as zenoh; -kedefine!( +zenoh::kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", pub(crate) settings_format: "user_id/${user_id:*}/settings/${setting:**}" ); @@ -23,7 +23,7 @@ fn main() { // Formatting let mut formatter = file_format::formatter(); let file = "hi/there"; - let ke = keformat!(formatter, user_id = 42, file).unwrap(); + let ke = zenoh::keformat!(formatter, user_id = 42, file).unwrap(); println!("{formatter:?} => {ke}"); // Parsing let settings_ke = keyexpr::new("user_id/30/settings/dark_mode").unwrap(); From ae723bc9d0c15739b043fdda5202219def82e05c Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 7 Apr 2024 22:18:43 +0200 Subject: [PATCH 171/357] zenoh namespace fix --- examples/examples/z_formats.rs | 6 +++--- examples/examples/z_pub_shm.rs | 2 +- zenoh/src/prelude.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index f2698b296b..aeadc8d55d 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -26,8 +26,8 @@ fn main() { let ke = zenoh::keformat!(formatter, user_id = 42, file).unwrap(); println!("{formatter:?} => {ke}"); // Parsing - let settings_ke = keyexpr::new("user_id/30/settings/dark_mode").unwrap(); + let settings_ke = zenoh::keyexpr::new("user_id/30/settings/dark_mode").unwrap(); let parsed = settings_format::parse(settings_ke).unwrap(); - assert_eq!(parsed.user_id(), keyexpr::new("30").unwrap()); - assert_eq!(parsed.setting(), keyexpr::new("dark_mode").ok()); + assert_eq!(parsed.user_id(), zenoh::keyexpr::new("30").unwrap()); + assert_eq!(parsed.setting(), zenoh::keyexpr::new("dark_mode").ok()); } diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 52dc52cce0..3d21d386b0 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -20,7 +20,7 @@ const N: usize = 10; const K: u32 = 3; #[tokio::main] -async fn main() -> Result<(), zenoh::Error> { +async fn main() -> Result<(), ZError> { // Initiate logging env_logger::init(); diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 87f67fe39c..1fc73d31b3 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -39,7 +39,7 @@ pub(crate) mod flat { pub use crate::buffers::*; pub use crate::config::*; - pub use crate::core::{Error, Resolvable, Resolve, Result}; + pub use crate::core::{Error as ZError, Resolvable, Resolve, Result as ZResult}; pub use crate::encoding::*; pub use crate::handlers::*; pub use crate::key_expr::*; From b089e6c5261b77c2a55dd59e3364deb0e5be6431 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 8 Apr 2024 12:45:55 +0200 Subject: [PATCH 172/357] fix for doc test - reexport in prelude both flat and mods --- plugins/zenoh-backend-traits/src/lib.rs | 2 -- zenoh/src/api/builders/publication.rs | 4 --- zenoh/src/api/session.rs | 2 -- zenoh/src/lib.rs | 2 ++ zenoh/src/prelude.rs | 35 ++++++++++++++++++++++--- 5 files changed, 33 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index a8910d784b..5db79b57bd 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -30,10 +30,8 @@ //! use std::sync::Arc; //! use async_trait::async_trait; //! use zenoh::prelude::r#async::*; -//! use zenoh::time::Timestamp; //! use zenoh_backend_traits::*; //! use zenoh_backend_traits::config::*; -//! use zenoh::Result as ZResult; //! //! #[no_mangle] //! pub fn create_volume(config: VolumeConfig) -> ZResult> { diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index eb60021dbd..b6ebb0bad2 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -57,8 +57,6 @@ pub struct PublicationBuilderDelete; /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// use zenoh::sample::builder::{ValueBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session @@ -236,8 +234,6 @@ impl AsyncResolve for PublicationBuilder, PublicationBu /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// use zenoh::sample::builder::QoSBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 136dfda1bf..8d0ae2ff66 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -480,7 +480,6 @@ impl Session { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::Session; /// /// let session = Session::leak(zenoh::open(config::peer()).res().await.unwrap()); /// let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); @@ -687,7 +686,6 @@ impl Session { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::prelude::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index b3c88a109b..016888a8fd 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -105,7 +105,9 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ] ); +// Reexport some functions directly to root `zenoh::`` namespace for convenience pub use crate::api::session::open; +pub use crate::scouting::scout; pub mod prelude; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 1fc73d31b3..d204aeabdc 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -26,16 +26,16 @@ //! Examples: //! //! ``` -//!`use zenoh::prelude::*; +//!use zenoh::prelude::*; //! ``` //! ``` -//!`use zenoh::prelude::sync::*; +//!use zenoh::prelude::sync::*; //! ``` //! ``` -//!`use zenoh::prelude::r#async::*; +//!use zenoh::prelude::r#async::*; //! ``` -// All API types and traits in flat namespace +// Reexport API in flat namespace pub(crate) mod flat { pub use crate::buffers::*; pub use crate::config::*; @@ -59,17 +59,44 @@ pub(crate) mod flat { pub use crate::value::*; } +// Reexport API in hierarchical namespace +pub(crate) mod mods { + pub use crate::buffers; + pub use crate::config; + pub use crate::core; + pub use crate::encoding; + pub use crate::handlers; + pub use crate::key_expr; + pub use crate::payload; + pub use crate::plugins; + pub use crate::publication; + pub use crate::query; + pub use crate::queryable; + pub use crate::sample; + pub use crate::scouting; + pub use crate::selector; + pub use crate::session; + #[cfg(feature = "shared-memory")] + pub use crate::shm; + pub use crate::subscriber; + pub use crate::time; + pub use crate::value; +} + pub use crate::core::AsyncResolve; pub use crate::core::SyncResolve; pub use flat::*; +pub use mods::*; /// Prelude to import when using Zenoh's sync API. pub mod sync { pub use super::flat::*; + pub use super::mods::*; pub use crate::core::SyncResolve; } /// Prelude to import when using Zenoh's async API. pub mod r#async { pub use super::flat::*; + pub use super::mods::*; pub use crate::core::AsyncResolve; } From 2e6a7b2ddce97981a6b3c849783596804b94f91a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 8 Apr 2024 12:55:58 +0200 Subject: [PATCH 173/357] unnecessary inports removed from doc --- zenoh/src/api/liveliness.rs | 1 - zenoh/src/api/query.rs | 1 - zenoh/src/api/queryable.rs | 1 - zenoh/src/api/scouting.rs | 9 --------- 4 files changed, 12 deletions(-) diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 78f32efcba..70b09ad738 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -584,7 +584,6 @@ where /// # async fn main() { /// # use std::convert::TryFrom; /// use zenoh::prelude::r#async::*; -/// use zenoh::query::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let tokens = session diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index b196f2bcaf..3c15b18054 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -110,7 +110,6 @@ pub(crate) struct QueryState { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::query::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let replies = session diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 3e5117356a..8cd6292e3d 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -647,7 +647,6 @@ impl Drop for CallbackQueryable<'_> { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::queryable; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index c15e9955a3..a769c34e8c 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -27,7 +27,6 @@ use zenoh_result::ZResult; /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .res() @@ -54,7 +53,6 @@ impl ScoutBuilder { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) @@ -90,7 +88,6 @@ impl ScoutBuilder { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; /// /// let mut n = 0; /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) @@ -118,7 +115,6 @@ impl ScoutBuilder { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) @@ -186,7 +182,6 @@ where /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) @@ -208,7 +203,6 @@ impl ScoutInner { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) @@ -237,7 +231,6 @@ impl fmt::Debug for ScoutInner { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) @@ -272,7 +265,6 @@ impl Scout { /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; /// /// let scout = zenoh::scout(WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) @@ -347,7 +339,6 @@ fn _scout( /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .res() From ba41a34230792ca49eba148cdb21a5b9d117c68b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 8 Apr 2024 16:42:37 +0200 Subject: [PATCH 174/357] shortened import statements in tests --- zenoh/src/lib.rs | 1 + zenoh/tests/attachments.rs | 13 ++----------- zenoh/tests/connection_retry.rs | 16 ++++++++++++++-- zenoh/tests/events.rs | 8 ++------ zenoh/tests/handler.rs | 13 +++---------- zenoh/tests/interceptors.rs | 6 ++---- zenoh/tests/liveliness.rs | 12 +++--------- zenoh/tests/matching.rs | 30 ++++++++++-------------------- zenoh/tests/qos.rs | 7 ++----- zenoh/tests/routing.rs | 10 ++-------- zenoh/tests/session.rs | 11 ++--------- zenoh/tests/unicity.rs | 11 ++--------- 12 files changed, 45 insertions(+), 93 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 016888a8fd..6c139e4b24 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -335,6 +335,7 @@ pub mod internal { pub use zenoh_core::zasync_executor_init; pub use zenoh_core::zerror; pub use zenoh_core::zlock; + pub use zenoh_core::ztimeout; pub use zenoh_macros::unstable; pub use zenoh_result::bail; pub use zenoh_sync::Condition; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 41c8d85dd7..ef8b70f772 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,10 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn pubsub() { - use zenoh::{sample::SampleBuilderTrait, session::SessionDeclarations}; - use zenoh_config::Config; - use zenoh_core::SyncResolve; - + use zenoh::prelude::sync::*; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh .declare_subscriber("test/attachment") @@ -63,13 +60,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{ - sample::{Attachment, SampleBuilderTrait, ValueBuilderTrait}, - session::SessionDeclarations, - }; - use zenoh_config::Config; - use zenoh_core::SyncResolve; - + use zenoh::prelude::sync::*; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh .declare_queryable("test/attachment") diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index f510e4f54a..d99017ff43 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,5 +1,17 @@ -use zenoh_config::{Config, ConnectionRetryConf, EndPoint, ValidatedMap}; -use zenoh_core::SyncResolve; +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::prelude::sync::*; #[test] fn retry_config_overriding() { diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 3069e53e24..b659b462df 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -12,12 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::query::Reply; -use zenoh::sample::SampleKind; -use zenoh::session::{Session, SessionDeclarations}; -use zenoh_config::peer; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; +use zenoh::prelude::r#async::*; +use zenoh::internal::ztimeout; const TIMEOUT: Duration = Duration::from_secs(10); diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index 82030daef5..4f8be094d2 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -1,6 +1,3 @@ -use zenoh::{sample::ValueBuilderTrait, session::SessionDeclarations}; -use zenoh_config::Config; - // // Copyright (c) 2024 ZettaScale Technology // @@ -14,12 +11,11 @@ use zenoh_config::Config; // Contributors: // ZettaScale Zenoh Team, // +use std::{thread, time::Duration}; +use zenoh::prelude::sync::*; + #[test] fn pubsub_with_ringbuffer() { - use std::{thread, time::Duration}; - use zenoh::handlers::RingBuffer; - use zenoh_core::SyncResolve; - let zenoh = zenoh::open(Config::default()).res().unwrap(); let sub = zenoh .declare_subscriber("test/ringbuffer") @@ -50,9 +46,6 @@ fn pubsub_with_ringbuffer() { #[test] fn query_with_ringbuffer() { - use zenoh::handlers::RingBuffer; - use zenoh_core::SyncResolve; - let zenoh = zenoh::open(Config::default()).res().unwrap(); let queryable = zenoh .declare_queryable("test/ringbuffer_query") diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index c20dcafdb7..036dcd8e2a 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -12,10 +12,8 @@ // ZettaScale Zenoh Team, // use std::sync::{Arc, Mutex}; -use zenoh::session::SessionDeclarations; -use zenoh_config::{Config, ValidatedMap}; -use zenoh_core::zlock; -use zenoh_core::SyncResolve; +use zenoh::internal::zlock; +use zenoh::prelude::sync::*; struct IntervalCounter { first_tick: bool, diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 43dfd37281..79f0e277be 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -11,18 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::time::Duration; -use zenoh_config as config; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; - -const TIMEOUT: Duration = Duration::from_secs(60); -const SLEEP: Duration = Duration::from_secs(1); - #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_liveliness() { - use zenoh::{sample::SampleKind, session::SessionDeclarations}; + use {std::time::Duration, zenoh::internal::ztimeout, zenoh::prelude::*}; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); let mut c1 = config::peer(); c1.listen diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 341f66bba7..4e838f98a1 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -11,17 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::str::FromStr; -use std::time::Duration; -use zenoh::config::Locator; -use zenoh::session::Session; -use zenoh_config as config; -use zenoh_config::peer; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; -use zenoh_result::ZResult as Result; +#[cfg(feature = "unstable")] +use { + flume::RecvTimeoutError, std::str::FromStr, std::time::Duration, zenoh::internal::ztimeout, + zenoh::prelude::*, +}; +#[cfg(feature = "unstable")] const TIMEOUT: Duration = Duration::from_secs(60); +#[cfg(feature = "unstable")] const RECV_TIMEOUT: Duration = Duration::from_secs(1); #[cfg(feature = "unstable")] @@ -44,10 +42,7 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_matching_status_any() -> Result<()> { - use flume::RecvTimeoutError; - use zenoh::{sample::Locality, session::SessionDeclarations}; - +async fn zenoh_matching_status_any() -> ZResult<()> { let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; let publisher1 = ztimeout!(session1 @@ -106,9 +101,7 @@ async fn zenoh_matching_status_any() -> Result<()> { #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_matching_status_remote() -> Result<()> { - use flume::RecvTimeoutError; - use zenoh::{sample::Locality, session::SessionDeclarations}; +async fn zenoh_matching_status_remote() -> ZResult<()> { let session1 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); let session2 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); @@ -170,10 +163,7 @@ async fn zenoh_matching_status_remote() -> Result<()> { #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_matching_status_local() -> Result<()> { - use flume::RecvTimeoutError; - use zenoh::{sample::Locality, session::SessionDeclarations}; - +async fn zenoh_matching_status_local() -> ZResult<()> { let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 5e3f507006..3c0cfb0b37 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,11 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::{publication::Priority, session::SessionDeclarations}; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; +use zenoh::internal::ztimeout; +use zenoh::prelude::r#async::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index f79da05483..07af6a8840 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -16,15 +16,9 @@ use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; -use zenoh::config::{Config, ModeDependentValue}; +use zenoh::internal::{bail, ztimeout}; +use zenoh::prelude::r#async::*; use zenoh::core::Result; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::{Session, SessionDeclarations}; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; -use zenoh_result::bail; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index b325e7601b..724d48eef1 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -14,15 +14,8 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; -use zenoh::key_expr::KeyExpr; -use zenoh::publication::CongestionControl; -use zenoh::sample::{QoSBuilderTrait, SampleKind}; -use zenoh::session::{Session, SessionDeclarations}; -use zenoh::subscriber::Reliability; -use zenoh::value::Value; -use zenoh_config as config; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; +use zenoh::internal::ztimeout; +use zenoh::prelude::r#async::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 8ad80b7315..d54186050c 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -11,19 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use config::WhatAmI; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::runtime::Handle; -use zenoh::config::EndPoint; -use zenoh::key_expr::KeyExpr; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::{Session, SessionDeclarations}; -use zenoh_config as config; -use zenoh_core::ztimeout; -use zenoh_core::AsyncResolve; +use zenoh::internal::ztimeout; +use zenoh::prelude::r#async::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From 182eb3a7d9a0422f6d67a1bf0696468d1a486b9a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 8 Apr 2024 17:54:12 +0200 Subject: [PATCH 175/357] QoS type leak fixed --- zenoh/src/api/builders/sample.rs | 2 +- zenoh/src/api/sample.rs | 23 ++++++++++++++++------- zenoh/src/lib.rs | 4 ++-- zenoh/src/prelude.rs | 2 -- zenoh/tests/qos.rs | 10 ++++------ 5 files changed, 23 insertions(+), 18 deletions(-) diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 2af1a0a71c..ee0f716800 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -145,7 +145,7 @@ impl SampleBuilder { } // Allows to change qos as a whole of [`Sample`] - pub fn qos(self, qos: QoS) -> Self { + pub(crate) fn qos(self, qos: QoS) -> Self { Self { sample: Sample { qos, ..self.sample }, _t: PhantomData::, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 4321ac6d6c..f2ff96fb04 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -546,16 +546,25 @@ impl Sample { &self.encoding } - /// Gets the timestamp of this Sample. + /// Gets the timestamp of this Sample #[inline] pub fn timestamp(&self) -> Option<&Timestamp> { self.timestamp.as_ref() } - /// Gets the quality of service settings this Sample was sent with. - #[inline] - pub fn qos(&self) -> &QoS { - &self.qos + /// Gets the congetion control of this Sample + pub fn congestion_control(&self) -> CongestionControl { + self.qos.congestion_control() + } + + /// Gets the priority of this Sample + pub fn priority(&self) -> Priority { + self.qos.priority() + } + + /// Gets the express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. + pub fn express(&self) -> bool { + self.qos.express() } /// Gets infos on the source of this Sample. @@ -581,12 +590,12 @@ impl From for Value { /// Structure containing quality of service data #[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] -pub struct QoS { +pub(crate) struct QoS { inner: QoSType, } #[derive(Debug)] -pub struct QoSBuilder(QoS); +pub(crate) struct QoSBuilder(QoS); impl From for QoSBuilder { fn from(qos: QoS) -> Self { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 6c139e4b24..f071360567 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -105,9 +105,9 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ] ); -// Reexport some functions directly to root `zenoh::`` namespace for convenience +// Expose some functions directly to root `zenoh::`` namespace for convenience pub use crate::api::session::open; -pub use crate::scouting::scout; +pub use crate::api::scouting::scout; pub mod prelude; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index d204aeabdc..e89542122d 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -44,7 +44,6 @@ pub(crate) mod flat { pub use crate::handlers::*; pub use crate::key_expr::*; pub use crate::payload::*; - pub use crate::plugins::*; pub use crate::publication::*; pub use crate::query::*; pub use crate::queryable::*; @@ -68,7 +67,6 @@ pub(crate) mod mods { pub use crate::handlers; pub use crate::key_expr; pub use crate::payload; - pub use crate::plugins; pub use crate::publication; pub use crate::query; pub use crate::queryable; diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 3c0cfb0b37..b70d01ec79 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -42,15 +42,13 @@ async fn pubsub() { ztimeout!(publisher1.put("qos").res_async()).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); - let qos = sample.qos(); - assert_eq!(qos.priority(), Priority::DataHigh); - assert_eq!(qos.congestion_control(), CongestionControl::Drop); + assert_eq!(sample.priority(), Priority::DataHigh); + assert_eq!(sample.congestion_control(), CongestionControl::Drop); ztimeout!(publisher2.put("qos").res_async()).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); - let qos = sample.qos(); - assert_eq!(qos.priority(), Priority::DataLow); - assert_eq!(qos.congestion_control(), CongestionControl::Block); + assert_eq!(sample.priority(), Priority::DataLow); + assert_eq!(sample.congestion_control(), CongestionControl::Block); } From 5aedd2c09219073895ab1fcd105f2fb2b05a5d86 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 8 Apr 2024 18:01:43 +0200 Subject: [PATCH 176/357] Payload tuple generic impl --- Cargo.lock | 7 + Cargo.toml | 1 + zenoh/Cargo.toml | 1 + zenoh/src/payload.rs | 1143 ++++++++++++++++++++++++++++++++++++--- zenoh/src/queryable.rs | 1 + zenoh/src/session.rs | 13 +- zenoh/src/subscriber.rs | 3 - 7 files changed, 1093 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3ea8978b5..3f74af9ed1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3954,6 +3954,12 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "unwrap-infallible" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "151ac09978d3c2862c4e39b557f4eceee2cc72150bc4cb4f16abf061b6e381fb" + [[package]] name = "unzip-n" version = "0.1.2" @@ -4479,6 +4485,7 @@ dependencies = [ "tokio", "tokio-util", "uhlc", + "unwrap-infallible", "uuid", "vec_map", "zenoh-buffers", diff --git a/Cargo.toml b/Cargo.toml index 9210c96b70..d02f84eca8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -159,6 +159,7 @@ tokio-rustls = "0.25.0" console-subscriber = "0.2" typenum = "1.16.0" uhlc = { version = "0.7.0", default-features = false } # Default features are disabled due to usage in no_std crates +unwrap-infallible = "0.1.5" unzip-n = "0.1.2" url = "2.3.1" urlencoding = "2.1.2" diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index d20a4b914e..80cf8ba1bc 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -90,6 +90,7 @@ serde_yaml = { workspace = true } socket2 = { workspace = true } stop-token = { workspace = true } uhlc = { workspace = true, features = ["default"] } +unwrap-infallible = { workspace = true } uuid = { workspace = true, features = ["default"] } vec_map = { workspace = true } zenoh-buffers = { workspace = true, features = ["std"] } diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index ed2a58145c..db3126d93d 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -14,17 +14,38 @@ //! Payload primitives. use crate::buffers::ZBuf; +use std::marker::PhantomData; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, }; -use zenoh_buffers::buffer::Buffer; +use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ - buffer::SplitBuffer, reader::HasReader, writer::HasWriter, ZBufReader, ZSlice, + buffer::{Buffer, SplitBuffer}, + reader::{HasReader, Reader}, + writer::HasWriter, + ZBufReader, ZSlice, }; -use zenoh_result::ZResult; +use zenoh_codec::{RCodec, WCodec, Zenoh080}; +use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::SharedMemoryBuf; +/// Trait to encode a type `T` into a [`Value`]. +pub trait Serialize { + type Output; + + /// The implementer should take care of serializing the type `T` and set the proper [`Encoding`]. + fn serialize(self, t: T) -> Self::Output; +} + +pub trait Deserialize<'a, T> { + type Error; + + /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. + fn deserialize(self, t: &'a Payload) -> Result; +} + +/// A payload contains the serialized bytes of user data. #[repr(transparent)] #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Payload(ZBuf); @@ -57,14 +78,17 @@ impl Payload { pub fn reader(&self) -> PayloadReader<'_> { PayloadReader(self.0.reader()) } -} - -/// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. -pub struct PayloadReader<'a>(ZBufReader<'a>); -impl std::io::Read for PayloadReader<'_> { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - self.0.read(buf) + /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. + pub fn iter(&self) -> PayloadIterator<'_, T> + where + T: TryFrom, + ZSerde: for<'b> Deserialize<'b, T, Error = ZDeserializeError>, + { + PayloadIterator { + reader: self.0.reader(), + _t: PhantomData::, + } } } @@ -99,19 +123,45 @@ impl Payload { } } -/// Trait to encode a type `T` into a [`Value`]. -pub trait Serialize { - type Output; +/// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. +pub struct PayloadReader<'a>(ZBufReader<'a>); - /// The implementer should take care of serializing the type `T` and set the proper [`Encoding`]. - fn serialize(self, t: T) -> Self::Output; +impl std::io::Read for PayloadReader<'_> { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + std::io::Read::read(&mut self.0, buf) + } } -pub trait Deserialize<'a, T> { - type Error; +/// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. +/// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. +pub struct PayloadIterator<'a, T> +where + ZSerde: Deserialize<'a, T>, +{ + reader: ZBufReader<'a>, + _t: PhantomData, +} - /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &'a Payload) -> Result; +impl<'a, T> Iterator for PayloadIterator<'a, T> +where + ZSerde: for<'b> Deserialize<'b, T, Error = ZDeserializeError>, +{ + type Item = T; + + fn next(&mut self) -> Option { + let codec = Zenoh080::new(); + + let kbuf: ZBuf = codec.read(&mut self.reader).ok()?; + let kpld = Payload::new(kbuf); + + let t = ZSerde.deserialize(&kpld).ok()?; + Some(t) + } + + fn size_hint(&self) -> (usize, Option) { + let remaining = self.reader.remaining(); + (remaining, Some(remaining)) + } } /// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. @@ -122,7 +172,7 @@ pub struct ZSerde; #[derive(Debug, Clone, Copy)] pub struct ZDeserializeError; -// Bytes +// ZBuf impl Serialize for ZSerde { type Output = Payload; @@ -131,9 +181,9 @@ impl Serialize for ZSerde { } } -impl From for ZBuf { - fn from(value: Payload) -> Self { - value.0 +impl From for Payload { + fn from(t: ZBuf) -> Self { + ZSerde.serialize(t) } } @@ -141,16 +191,23 @@ impl Deserialize<'_, ZBuf> for ZSerde { type Error = Infallible; fn deserialize(self, v: &Payload) -> Result { - Ok(v.into()) + Ok(v.0.clone()) + } +} + +impl From for ZBuf { + fn from(value: Payload) -> Self { + value.0 } } impl From<&Payload> for ZBuf { fn from(value: &Payload) -> Self { - value.0.clone() + ZSerde.deserialize(value).unwrap_infallible() } } +// Vec impl Serialize> for ZSerde { type Output = Payload; @@ -159,11 +216,9 @@ impl Serialize> for ZSerde { } } -impl Serialize<&[u8]> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &[u8]) -> Self::Output { - Payload::new(t.to_vec()) +impl From> for Payload { + fn from(t: Vec) -> Self { + ZSerde.serialize(t) } } @@ -171,16 +226,38 @@ impl Deserialize<'_, Vec> for ZSerde { type Error = Infallible; fn deserialize(self, v: &Payload) -> Result, Self::Error> { - Ok(Vec::from(v)) + Ok(v.0.contiguous().to_vec()) + } +} + +impl From for Vec { + fn from(value: Payload) -> Self { + ZSerde.deserialize(&value).unwrap_infallible() } } impl From<&Payload> for Vec { fn from(value: &Payload) -> Self { - Cow::from(value).to_vec() + ZSerde.deserialize(value).unwrap_infallible() } } +// &[u8] +impl Serialize<&[u8]> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &[u8]) -> Self::Output { + Payload::new(t.to_vec()) + } +} + +impl From<&[u8]> for Payload { + fn from(t: &[u8]) -> Self { + ZSerde.serialize(t) + } +} + +// Cow<[u8]> impl<'a> Serialize> for ZSerde { type Output = Payload; @@ -189,6 +266,12 @@ impl<'a> Serialize> for ZSerde { } } +impl From> for Payload { + fn from(t: Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { type Error = Infallible; @@ -199,7 +282,7 @@ impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { impl<'a> From<&'a Payload> for Cow<'a, [u8]> { fn from(value: &'a Payload) -> Self { - value.0.contiguous() + ZSerde.deserialize(value).unwrap_infallible() } } @@ -212,11 +295,9 @@ impl Serialize for ZSerde { } } -impl Serialize<&str> for ZSerde { - type Output = Payload; - - fn serialize(self, s: &str) -> Self::Output { - Self.serialize(s.to_string()) +impl From for Payload { + fn from(t: String) -> Self { + ZSerde.serialize(t) } } @@ -224,7 +305,16 @@ impl Deserialize<'_, String> for ZSerde { type Error = FromUtf8Error; fn deserialize(self, v: &Payload) -> Result { - String::from_utf8(Vec::from(v)) + let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); + String::from_utf8(v) + } +} + +impl TryFrom for String { + type Error = FromUtf8Error; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) } } @@ -236,14 +326,22 @@ impl TryFrom<&Payload> for String { } } -impl TryFrom for String { - type Error = FromUtf8Error; +// &str +impl Serialize<&str> for ZSerde { + type Output = Payload; - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) + fn serialize(self, s: &str) -> Self::Output { + Self.serialize(s.to_string()) } } +impl From<&str> for Payload { + fn from(t: &str) -> Self { + ZSerde.serialize(t) + } +} + +// Cow impl<'a> Serialize> for ZSerde { type Output = Payload; @@ -252,6 +350,12 @@ impl<'a> Serialize> for ZSerde { } } +impl From> for Payload { + fn from(t: Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { type Error = FromUtf8Error; @@ -277,7 +381,11 @@ macro_rules! impl_int { fn serialize(self, t: $t) -> Self::Output { let bs = t.to_le_bytes(); - let end = 1 + bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1); + let end = if t == 0 as $t { + 0 + } else { + 1 + bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1) + }; // SAFETY: // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 // - end is a valid end index because is bounded between 0 and bs.len() @@ -285,6 +393,12 @@ macro_rules! impl_int { } } + impl From<$t> for Payload { + fn from(t: $t) -> Self { + ZSerde.serialize(t) + } + } + impl Serialize<&$t> for ZSerde { type Output = Payload; @@ -293,11 +407,23 @@ macro_rules! impl_int { } } + impl From<&$t> for Payload { + fn from(t: &$t) -> Self { + ZSerde.serialize(t) + } + } + impl Serialize<&mut $t> for ZSerde { type Output = Payload; fn serialize(self, t: &mut $t) -> Self::Output { - Self.serialize(*t) + ZSerde.serialize(*t) + } + } + + impl From<&mut $t> for Payload { + fn from(t: &mut $t) -> Self { + ZSerde.serialize(t) } } @@ -319,6 +445,14 @@ macro_rules! impl_int { } } + impl TryFrom for $t { + type Error = ZDeserializeError; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } + } + impl TryFrom<&Payload> for $t { type Error = ZDeserializeError; @@ -349,12 +483,18 @@ impl_int!(f64, ZSerde::ZENOH_FLOAT); // Zenoh bool impl Serialize for ZSerde { - type Output = ZBuf; + type Output = Payload; fn serialize(self, t: bool) -> Self::Output { // SAFETY: casting a bool into an integer is well-defined behaviour. // 0 is false, 1 is true: https://doc.rust-lang.org/std/primitive.bool.html - ZBuf::from((t as u8).to_le_bytes()) + Payload::new(ZBuf::from((t as u8).to_le_bytes())) + } +} + +impl From for Payload { + fn from(t: bool) -> Self { + ZSerde.serialize(t) } } @@ -391,6 +531,14 @@ impl Serialize<&serde_json::Value> for ZSerde { } } +impl TryFrom<&serde_json::Value> for Payload { + type Error = serde_json::Error; + + fn try_from(value: &serde_json::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Serialize for ZSerde { type Output = Result; @@ -399,6 +547,14 @@ impl Serialize for ZSerde { } } +impl TryFrom for Payload { + type Error = serde_json::Error; + + fn try_from(value: serde_json::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Deserialize<'_, serde_json::Value> for ZSerde { type Error = serde_json::Error; @@ -407,11 +563,11 @@ impl Deserialize<'_, serde_json::Value> for ZSerde { } } -impl TryFrom for Payload { +impl TryFrom<&Payload> for serde_json::Value { type Error = serde_json::Error; - fn try_from(value: serde_json::Value) -> Result { - ZSerde.serialize(value) + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) } } @@ -426,6 +582,14 @@ impl Serialize<&serde_yaml::Value> for ZSerde { } } +impl TryFrom<&serde_yaml::Value> for Payload { + type Error = serde_yaml::Error; + + fn try_from(value: &serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Serialize for ZSerde { type Output = Result; @@ -434,6 +598,14 @@ impl Serialize for ZSerde { } } +impl TryFrom for Payload { + type Error = serde_yaml::Error; + + fn try_from(value: serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Deserialize<'_, serde_yaml::Value> for ZSerde { type Error = serde_yaml::Error; @@ -442,11 +614,11 @@ impl Deserialize<'_, serde_yaml::Value> for ZSerde { } } -impl TryFrom for Payload { +impl TryFrom<&Payload> for serde_yaml::Value { type Error = serde_yaml::Error; - fn try_from(value: serde_yaml::Value) -> Result { - ZSerde.serialize(value) + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) } } @@ -461,6 +633,14 @@ impl Serialize<&serde_cbor::Value> for ZSerde { } } +impl TryFrom<&serde_cbor::Value> for Payload { + type Error = serde_cbor::Error; + + fn try_from(value: &serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Serialize for ZSerde { type Output = Result; @@ -469,6 +649,14 @@ impl Serialize for ZSerde { } } +impl TryFrom for Payload { + type Error = serde_cbor::Error; + + fn try_from(value: serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Deserialize<'_, serde_cbor::Value> for ZSerde { type Error = serde_cbor::Error; @@ -477,11 +665,11 @@ impl Deserialize<'_, serde_cbor::Value> for ZSerde { } } -impl TryFrom for Payload { +impl TryFrom<&Payload> for serde_cbor::Value { type Error = serde_cbor::Error; - fn try_from(value: serde_cbor::Value) -> Result { - ZSerde.serialize(value) + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) } } @@ -500,6 +688,14 @@ impl Serialize<&serde_pickle::Value> for ZSerde { } } +impl TryFrom<&serde_pickle::Value> for Payload { + type Error = serde_pickle::Error; + + fn try_from(value: &serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Serialize for ZSerde { type Output = Result; @@ -508,6 +704,14 @@ impl Serialize for ZSerde { } } +impl TryFrom for Payload { + type Error = serde_pickle::Error; + + fn try_from(value: serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Deserialize<'_, serde_pickle::Value> for ZSerde { type Error = serde_pickle::Error; @@ -516,11 +720,11 @@ impl Deserialize<'_, serde_pickle::Value> for ZSerde { } } -impl TryFrom for Payload { +impl TryFrom<&Payload> for serde_pickle::Value { type Error = serde_pickle::Error; - fn try_from(value: serde_pickle::Value) -> Result { - ZSerde.serialize(value) + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) } } @@ -553,15 +757,86 @@ impl Serialize for ZSerde { } } -impl From for Payload +// Tuple +impl Serialize<(A, B)> for ZSerde where - ZSerde: Serialize, + A: Into, + B: Into, { - fn from(t: T) -> Self { - ZSerde.serialize(t) + type Output = Payload; + + fn serialize(self, t: (A, B)) -> Self::Output { + let (a, b) = t; + + let codec = Zenoh080::new(); + let mut buffer: ZBuf = ZBuf::empty(); + let mut writer = buffer.writer(); + let apld: Payload = a.into(); + let bpld: Payload = b.into(); + + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &apld.0).unwrap_unchecked(); + codec.write(&mut writer, &bpld.0).unwrap_unchecked(); + } + + Payload::new(buffer) + } +} + +impl<'a, A, B> Deserialize<'a, (A, B)> for ZSerde +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, +{ + type Error = ZError; + + fn deserialize(self, payload: &'a Payload) -> Result<(A, B), Self::Error> { + let codec = Zenoh080::new(); + let mut reader = payload.0.reader(); + + let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; + let apld = Payload::new(abuf); + + let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; + let bpld = Payload::new(bbuf); + + let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; + let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; + Ok((a, b)) } } +// Iterator +// impl Serialize for ZSerde +// where +// I: Iterator, +// T: Into, +// { +// type Output = Payload; + +// fn serialize(self, iter: I) -> Self::Output { +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// for t in iter { +// let tpld: Payload = t.into(); +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); +// } +// } + +// Payload::new(buffer) +// } +// } + // For convenience to always convert a Value the examples #[derive(Debug, Clone, PartialEq, Eq)] pub enum StringOrBase64 { @@ -619,14 +894,18 @@ mod tests { ($t:ty, $in:expr) => { let i = $in; let t = i.clone(); + println!("Serialize:\t{:?}", t); let v = Payload::serialize(t); + println!("Deserialize:\t{:?}", v); let o: $t = v.deserialize().unwrap(); - assert_eq!(i, o) + assert_eq!(i, o); + println!(""); }; } let mut rng = rand::thread_rng(); + // unsigned integer serialize_deserialize!(u8, u8::MIN); serialize_deserialize!(u16, u16::MIN); serialize_deserialize!(u32, u32::MIN); @@ -647,6 +926,7 @@ mod tests { serialize_deserialize!(usize, rng.gen::()); } + // signed integer serialize_deserialize!(i8, i8::MIN); serialize_deserialize!(i16, i16::MIN); serialize_deserialize!(i32, i32::MIN); @@ -667,6 +947,7 @@ mod tests { serialize_deserialize!(isize, rng.gen::()); } + // float serialize_deserialize!(f32, f32::MIN); serialize_deserialize!(f64, f64::MIN); @@ -678,13 +959,747 @@ mod tests { serialize_deserialize!(f64, rng.gen::()); } + // String serialize_deserialize!(String, ""); serialize_deserialize!(String, String::from("abcdefghijklmnopqrstuvwxyz")); + // Vec serialize_deserialize!(Vec, vec![0u8; 0]); serialize_deserialize!(Vec, vec![0u8; 64]); + // ZBuf serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + + // Tuple + serialize_deserialize!((usize, usize), (0, 1)); + serialize_deserialize!((usize, String), (0, String::from("a"))); + serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); + + // Iterator + // let mut hm = Vec::new(); + // hm.push(0); + // hm.push(1); + // Payload::serialize(hm.iter()); + + // let mut hm = HashMap::new(); + // hm.insert(0, 0); + // hm.insert(1, 1); + // Payload::serialize(hm.iter().map(|(k, v)| (k, v))); + // for (k, v) in sample.payload().iter::<(String, serde_json::Value)>() {} } } + +// macro_rules! impl_iterator_inner { +// ($iter:expr) => {{ +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// for t in $iter { +// let tpld = ZSerde.serialize(t); +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); +// } +// } + +// Payload::new(buffer) +// }}; +// } + +// impl<'a> Serialize> for ZSerde { +// type Output = Payload; + +// fn serialize(self, iter: std::slice::Iter<'_, i32>) -> Self::Output { +// impl_iterator_inner!(iter) +// } +// } + +// impl<'a> Serialize> for ZSerde { +// type Output = Payload; + +// fn serialize(self, iter: std::slice::IterMut<'_, i32>) -> Self::Output { +// impl_iterator_inner!(iter) +// } +// } + +// impl Serialize<&mut dyn Iterator> for ZSerde { +// type Output = Payload; + +// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// for t in iter { +// let tpld = ZSerde.serialize(t); +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); +// } +// } + +// Payload::new(buffer) +// } +// } + +// impl Serialize<(A, B)> for ZSerde +// where +// ZSerde: Serialize, +// ZSerde: Serialize, +// { +// type Output = Payload; + +// fn serialize(self, t: (A, B)) -> Self::Output { +// let (a, b) = t; + +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// let apld = Payload::serialize::(a); +// let bpld = Payload::serialize::(b); + +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &apld.0).unwrap_unchecked(); +// codec.write(&mut writer, &bpld.0).unwrap_unchecked(); +// } + +// Payload::new(buffer) +// } +// } + +// impl<'a, A, B> Deserialize<'a, (A, B)> for ZSerde +// where +// A: TryFrom, +// ZSerde: Deserialize<'a, A>, +// >::Error: Debug, +// B: TryFrom, +// ZSerde: Deserialize<'a, B>, +// >::Error: Debug, +// { +// type Error = ZError; + +// fn deserialize(self, payload: &'a Payload) -> Result<(A, B), Self::Error> { +// let codec = Zenoh080::new(); +// let mut reader = payload.0.reader(); + +// let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; +// let apld = Payload::new(abuf); + +// let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; +// let bpld = Payload::new(bbuf); + +// let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; +// let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; +// Ok((a, b)) +// } +// } + +// impl Serialize<&mut dyn Iterator> for ZSerde +// where +// ZSerde: Serialize, +// { +// type Output = Payload; + +// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// for t in iter { +// let tpld = ZSerde.serialize(t); +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); +// } +// } + +// Payload::new(buffer) +// } +// } + +// Iterator +// macro_rules! impl_iterator_serialize { +// ($a:ty) => { +// impl Serialize<&mut dyn Iterator> for ZSerde +// { +// type Output = Payload; + +// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// for t in iter { +// let tpld = ZSerde.serialize(t); +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); +// } +// } + +// Payload::new(buffer) +// } +// } +// }; +// } + +// Tuples +// macro_rules! impl_tuple_serialize { +// ($a:ty, $b:ty) => { +// impl Serialize<($a, $b)> for ZSerde +// { +// type Output = Payload; + +// fn serialize(self, t: ($a, $b)) -> Self::Output { +// let (a, b) = t; + +// let codec = Zenoh080::new(); +// let mut buffer: ZBuf = ZBuf::empty(); +// let mut writer = buffer.writer(); +// let apld = Payload::serialize::<$a>(a); +// let bpld = Payload::serialize::<$b>(b); + +// // SAFETY: we are serializing slices on a ZBuf, so serialization will never +// // fail unless we run out of memory. In that case, Rust memory allocator +// // will panic before the serializer has any chance to fail. +// unsafe { +// codec.write(&mut writer, &apld.0).unwrap_unchecked(); +// codec.write(&mut writer, &bpld.0).unwrap_unchecked(); +// } + +// Payload::new(buffer) +// } +// } +// } + +// } + +// macro_rules! impl_tuple_deserialize { +// ($a:ty, $b:ty) => { +// impl<'a> Deserialize<'a, ($a, $b)> for ZSerde { +// type Error = ZError; + +// fn deserialize(self, payload: &'a Payload) -> Result<($a, $b), Self::Error> { +// let codec = Zenoh080::new(); +// let mut reader = payload.0.reader(); + +// let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; +// let apld = Payload::new(abuf); + +// let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; +// let bpld = Payload::new(bbuf); + +// let a = apld.deserialize::<$a>().map_err(|e| zerror!("{:?}", e))?; +// let b = bpld.deserialize::<$b>().map_err(|e| zerror!("{:?}", e))?; +// Ok((a, b)) +// } +// } +// }; +// } + +// impl_tuple_serialize!(u8, u8); +// impl_tuple_deserialize!(u8, u8); +// impl_tuple_serialize!(u8, u16); +// impl_tuple_deserialize!(u8, u16); +// impl_tuple_serialize!(u8, u32); +// impl_tuple_deserialize!(u8, u32); +// impl_tuple_serialize!(u8, u64); +// impl_tuple_deserialize!(u8, u64); +// impl_tuple_serialize!(u8, usize); +// impl_tuple_deserialize!(u8, usize); +// impl_tuple_serialize!(u8, i8); +// impl_tuple_deserialize!(u8, i8); +// impl_tuple_serialize!(u8, i16); +// impl_tuple_deserialize!(u8, i16); +// impl_tuple_serialize!(u8, i32); +// impl_tuple_deserialize!(u8, i32); +// impl_tuple_serialize!(u8, isize); +// impl_tuple_deserialize!(u8, isize); +// impl_tuple_serialize!(u8, f32); +// impl_tuple_deserialize!(u8, f32); +// impl_tuple_serialize!(u8, f64); +// impl_tuple_deserialize!(u8, f64); +// impl_tuple_serialize!(u8, bool); +// impl_tuple_deserialize!(u8, bool); +// impl_tuple_serialize!(u8, ZBuf); +// impl_tuple_deserialize!(u8, ZBuf); +// impl_tuple_serialize!(u8, Vec); +// impl_tuple_deserialize!(u8, Vec); +// impl_tuple_serialize!(u8, String); +// impl_tuple_deserialize!(u8, String); +// impl_tuple_serialize!(u8, &[u8]); +// impl_tuple_serialize!(u16, u8); +// impl_tuple_deserialize!(u16, u8); +// impl_tuple_serialize!(u16, u16); +// impl_tuple_deserialize!(u16, u16); +// impl_tuple_serialize!(u16, u32); +// impl_tuple_deserialize!(u16, u32); +// impl_tuple_serialize!(u16, u64); +// impl_tuple_deserialize!(u16, u64); +// impl_tuple_serialize!(u16, usize); +// impl_tuple_deserialize!(u16, usize); +// impl_tuple_serialize!(u16, i8); +// impl_tuple_deserialize!(u16, i8); +// impl_tuple_serialize!(u16, i16); +// impl_tuple_deserialize!(u16, i16); +// impl_tuple_serialize!(u16, i32); +// impl_tuple_deserialize!(u16, i32); +// impl_tuple_serialize!(u16, isize); +// impl_tuple_deserialize!(u16, isize); +// impl_tuple_serialize!(u16, f32); +// impl_tuple_deserialize!(u16, f32); +// impl_tuple_serialize!(u16, f64); +// impl_tuple_deserialize!(u16, f64); +// impl_tuple_serialize!(u16, bool); +// impl_tuple_deserialize!(u16, bool); +// impl_tuple_serialize!(u16, ZBuf); +// impl_tuple_deserialize!(u16, ZBuf); +// impl_tuple_serialize!(u16, Vec); +// impl_tuple_deserialize!(u16, Vec); +// impl_tuple_serialize!(u16, String); +// impl_tuple_deserialize!(u16, String); +// impl_tuple_serialize!(u16, &[u8]); +// impl_tuple_serialize!(u32, u8); +// impl_tuple_deserialize!(u32, u8); +// impl_tuple_serialize!(u32, u16); +// impl_tuple_deserialize!(u32, u16); +// impl_tuple_serialize!(u32, u32); +// impl_tuple_deserialize!(u32, u32); +// impl_tuple_serialize!(u32, u64); +// impl_tuple_deserialize!(u32, u64); +// impl_tuple_serialize!(u32, usize); +// impl_tuple_deserialize!(u32, usize); +// impl_tuple_serialize!(u32, i8); +// impl_tuple_deserialize!(u32, i8); +// impl_tuple_serialize!(u32, i16); +// impl_tuple_deserialize!(u32, i16); +// impl_tuple_serialize!(u32, i32); +// impl_tuple_deserialize!(u32, i32); +// impl_tuple_serialize!(u32, isize); +// impl_tuple_deserialize!(u32, isize); +// impl_tuple_serialize!(u32, f32); +// impl_tuple_deserialize!(u32, f32); +// impl_tuple_serialize!(u32, f64); +// impl_tuple_deserialize!(u32, f64); +// impl_tuple_serialize!(u32, bool); +// impl_tuple_deserialize!(u32, bool); +// impl_tuple_serialize!(u32, ZBuf); +// impl_tuple_deserialize!(u32, ZBuf); +// impl_tuple_serialize!(u32, Vec); +// impl_tuple_deserialize!(u32, Vec); +// impl_tuple_serialize!(u32, String); +// impl_tuple_deserialize!(u32, String); +// impl_tuple_serialize!(u32, &[u8]); +// impl_tuple_serialize!(u64, u8); +// impl_tuple_deserialize!(u64, u8); +// impl_tuple_serialize!(u64, u16); +// impl_tuple_deserialize!(u64, u16); +// impl_tuple_serialize!(u64, u32); +// impl_tuple_deserialize!(u64, u32); +// impl_tuple_serialize!(u64, u64); +// impl_tuple_deserialize!(u64, u64); +// impl_tuple_serialize!(u64, usize); +// impl_tuple_deserialize!(u64, usize); +// impl_tuple_serialize!(u64, i8); +// impl_tuple_deserialize!(u64, i8); +// impl_tuple_serialize!(u64, i16); +// impl_tuple_deserialize!(u64, i16); +// impl_tuple_serialize!(u64, i32); +// impl_tuple_deserialize!(u64, i32); +// impl_tuple_serialize!(u64, isize); +// impl_tuple_deserialize!(u64, isize); +// impl_tuple_serialize!(u64, f32); +// impl_tuple_deserialize!(u64, f32); +// impl_tuple_serialize!(u64, f64); +// impl_tuple_deserialize!(u64, f64); +// impl_tuple_serialize!(u64, bool); +// impl_tuple_deserialize!(u64, bool); +// impl_tuple_serialize!(u64, ZBuf); +// impl_tuple_deserialize!(u64, ZBuf); +// impl_tuple_serialize!(u64, Vec); +// impl_tuple_deserialize!(u64, Vec); +// impl_tuple_serialize!(u64, String); +// impl_tuple_deserialize!(u64, String); +// impl_tuple_serialize!(u64, &[u8]); +// impl_tuple_serialize!(usize, u8); +// impl_tuple_deserialize!(usize, u8); +// impl_tuple_serialize!(usize, u16); +// impl_tuple_deserialize!(usize, u16); +// impl_tuple_serialize!(usize, u32); +// impl_tuple_deserialize!(usize, u32); +// impl_tuple_serialize!(usize, u64); +// impl_tuple_deserialize!(usize, u64); +// impl_tuple_serialize!(usize, usize); +// impl_tuple_deserialize!(usize, usize); +// impl_tuple_serialize!(usize, i8); +// impl_tuple_deserialize!(usize, i8); +// impl_tuple_serialize!(usize, i16); +// impl_tuple_deserialize!(usize, i16); +// impl_tuple_serialize!(usize, i32); +// impl_tuple_deserialize!(usize, i32); +// impl_tuple_serialize!(usize, isize); +// impl_tuple_deserialize!(usize, isize); +// impl_tuple_serialize!(usize, f32); +// impl_tuple_deserialize!(usize, f32); +// impl_tuple_serialize!(usize, f64); +// impl_tuple_deserialize!(usize, f64); +// impl_tuple_serialize!(usize, bool); +// impl_tuple_deserialize!(usize, bool); +// impl_tuple_serialize!(usize, ZBuf); +// impl_tuple_deserialize!(usize, ZBuf); +// impl_tuple_serialize!(usize, Vec); +// impl_tuple_deserialize!(usize, Vec); +// impl_tuple_serialize!(usize, String); +// impl_tuple_deserialize!(usize, String); +// impl_tuple_serialize!(usize, &[u8]); +// impl_tuple_serialize!(i8, u8); +// impl_tuple_deserialize!(i8, u8); +// impl_tuple_serialize!(i8, u16); +// impl_tuple_deserialize!(i8, u16); +// impl_tuple_serialize!(i8, u32); +// impl_tuple_deserialize!(i8, u32); +// impl_tuple_serialize!(i8, u64); +// impl_tuple_deserialize!(i8, u64); +// impl_tuple_serialize!(i8, usize); +// impl_tuple_deserialize!(i8, usize); +// impl_tuple_serialize!(i8, i8); +// impl_tuple_deserialize!(i8, i8); +// impl_tuple_serialize!(i8, i16); +// impl_tuple_deserialize!(i8, i16); +// impl_tuple_serialize!(i8, i32); +// impl_tuple_deserialize!(i8, i32); +// impl_tuple_serialize!(i8, isize); +// impl_tuple_deserialize!(i8, isize); +// impl_tuple_serialize!(i8, f32); +// impl_tuple_deserialize!(i8, f32); +// impl_tuple_serialize!(i8, f64); +// impl_tuple_deserialize!(i8, f64); +// impl_tuple_serialize!(i8, bool); +// impl_tuple_deserialize!(i8, bool); +// impl_tuple_serialize!(i8, ZBuf); +// impl_tuple_deserialize!(i8, ZBuf); +// impl_tuple_serialize!(i8, Vec); +// impl_tuple_deserialize!(i8, Vec); +// impl_tuple_serialize!(i8, String); +// impl_tuple_deserialize!(i8, String); +// impl_tuple_serialize!(i8, &[u8]); +// impl_tuple_serialize!(i16, u8); +// impl_tuple_deserialize!(i16, u8); +// impl_tuple_serialize!(i16, u16); +// impl_tuple_deserialize!(i16, u16); +// impl_tuple_serialize!(i16, u32); +// impl_tuple_deserialize!(i16, u32); +// impl_tuple_serialize!(i16, u64); +// impl_tuple_deserialize!(i16, u64); +// impl_tuple_serialize!(i16, usize); +// impl_tuple_deserialize!(i16, usize); +// impl_tuple_serialize!(i16, i8); +// impl_tuple_deserialize!(i16, i8); +// impl_tuple_serialize!(i16, i16); +// impl_tuple_deserialize!(i16, i16); +// impl_tuple_serialize!(i16, i32); +// impl_tuple_deserialize!(i16, i32); +// impl_tuple_serialize!(i16, isize); +// impl_tuple_deserialize!(i16, isize); +// impl_tuple_serialize!(i16, f32); +// impl_tuple_deserialize!(i16, f32); +// impl_tuple_serialize!(i16, f64); +// impl_tuple_deserialize!(i16, f64); +// impl_tuple_serialize!(i16, bool); +// impl_tuple_deserialize!(i16, bool); +// impl_tuple_serialize!(i16, ZBuf); +// impl_tuple_deserialize!(i16, ZBuf); +// impl_tuple_serialize!(i16, Vec); +// impl_tuple_deserialize!(i16, Vec); +// impl_tuple_serialize!(i16, String); +// impl_tuple_deserialize!(i16, String); +// impl_tuple_serialize!(i16, &[u8]); +// impl_tuple_serialize!(i32, u8); +// impl_tuple_deserialize!(i32, u8); +// impl_tuple_serialize!(i32, u16); +// impl_tuple_deserialize!(i32, u16); +// impl_tuple_serialize!(i32, u32); +// impl_tuple_deserialize!(i32, u32); +// impl_tuple_serialize!(i32, u64); +// impl_tuple_deserialize!(i32, u64); +// impl_tuple_serialize!(i32, usize); +// impl_tuple_deserialize!(i32, usize); +// impl_tuple_serialize!(i32, i8); +// impl_tuple_deserialize!(i32, i8); +// impl_tuple_serialize!(i32, i16); +// impl_tuple_deserialize!(i32, i16); +// impl_tuple_serialize!(i32, i32); +// impl_tuple_deserialize!(i32, i32); +// impl_tuple_serialize!(i32, isize); +// impl_tuple_deserialize!(i32, isize); +// impl_tuple_serialize!(i32, f32); +// impl_tuple_deserialize!(i32, f32); +// impl_tuple_serialize!(i32, f64); +// impl_tuple_deserialize!(i32, f64); +// impl_tuple_serialize!(i32, bool); +// impl_tuple_deserialize!(i32, bool); +// impl_tuple_serialize!(i32, ZBuf); +// impl_tuple_deserialize!(i32, ZBuf); +// impl_tuple_serialize!(i32, Vec); +// impl_tuple_deserialize!(i32, Vec); +// impl_tuple_serialize!(i32, String); +// impl_tuple_deserialize!(i32, String); +// impl_tuple_serialize!(i32, &[u8]); +// impl_tuple_serialize!(isize, u8); +// impl_tuple_deserialize!(isize, u8); +// impl_tuple_serialize!(isize, u16); +// impl_tuple_deserialize!(isize, u16); +// impl_tuple_serialize!(isize, u32); +// impl_tuple_deserialize!(isize, u32); +// impl_tuple_serialize!(isize, u64); +// impl_tuple_deserialize!(isize, u64); +// impl_tuple_serialize!(isize, usize); +// impl_tuple_deserialize!(isize, usize); +// impl_tuple_serialize!(isize, i8); +// impl_tuple_deserialize!(isize, i8); +// impl_tuple_serialize!(isize, i16); +// impl_tuple_deserialize!(isize, i16); +// impl_tuple_serialize!(isize, i32); +// impl_tuple_deserialize!(isize, i32); +// impl_tuple_serialize!(isize, isize); +// impl_tuple_deserialize!(isize, isize); +// impl_tuple_serialize!(isize, f32); +// impl_tuple_deserialize!(isize, f32); +// impl_tuple_serialize!(isize, f64); +// impl_tuple_deserialize!(isize, f64); +// impl_tuple_serialize!(isize, bool); +// impl_tuple_deserialize!(isize, bool); +// impl_tuple_serialize!(isize, ZBuf); +// impl_tuple_deserialize!(isize, ZBuf); +// impl_tuple_serialize!(isize, Vec); +// impl_tuple_deserialize!(isize, Vec); +// impl_tuple_serialize!(isize, String); +// impl_tuple_deserialize!(isize, String); +// impl_tuple_serialize!(isize, &[u8]); +// impl_tuple_serialize!(f32, u8); +// impl_tuple_deserialize!(f32, u8); +// impl_tuple_serialize!(f32, u16); +// impl_tuple_deserialize!(f32, u16); +// impl_tuple_serialize!(f32, u32); +// impl_tuple_deserialize!(f32, u32); +// impl_tuple_serialize!(f32, u64); +// impl_tuple_deserialize!(f32, u64); +// impl_tuple_serialize!(f32, usize); +// impl_tuple_deserialize!(f32, usize); +// impl_tuple_serialize!(f32, i8); +// impl_tuple_deserialize!(f32, i8); +// impl_tuple_serialize!(f32, i16); +// impl_tuple_deserialize!(f32, i16); +// impl_tuple_serialize!(f32, i32); +// impl_tuple_deserialize!(f32, i32); +// impl_tuple_serialize!(f32, isize); +// impl_tuple_deserialize!(f32, isize); +// impl_tuple_serialize!(f32, f32); +// impl_tuple_deserialize!(f32, f32); +// impl_tuple_serialize!(f32, f64); +// impl_tuple_deserialize!(f32, f64); +// impl_tuple_serialize!(f32, bool); +// impl_tuple_deserialize!(f32, bool); +// impl_tuple_serialize!(f32, ZBuf); +// impl_tuple_deserialize!(f32, ZBuf); +// impl_tuple_serialize!(f32, Vec); +// impl_tuple_deserialize!(f32, Vec); +// impl_tuple_serialize!(f32, String); +// impl_tuple_deserialize!(f32, String); +// impl_tuple_serialize!(f32, &[u8]); +// impl_tuple_serialize!(f64, u8); +// impl_tuple_deserialize!(f64, u8); +// impl_tuple_serialize!(f64, u16); +// impl_tuple_deserialize!(f64, u16); +// impl_tuple_serialize!(f64, u32); +// impl_tuple_deserialize!(f64, u32); +// impl_tuple_serialize!(f64, u64); +// impl_tuple_deserialize!(f64, u64); +// impl_tuple_serialize!(f64, usize); +// impl_tuple_deserialize!(f64, usize); +// impl_tuple_serialize!(f64, i8); +// impl_tuple_deserialize!(f64, i8); +// impl_tuple_serialize!(f64, i16); +// impl_tuple_deserialize!(f64, i16); +// impl_tuple_serialize!(f64, i32); +// impl_tuple_deserialize!(f64, i32); +// impl_tuple_serialize!(f64, isize); +// impl_tuple_deserialize!(f64, isize); +// impl_tuple_serialize!(f64, f32); +// impl_tuple_deserialize!(f64, f32); +// impl_tuple_serialize!(f64, f64); +// impl_tuple_deserialize!(f64, f64); +// impl_tuple_serialize!(f64, bool); +// impl_tuple_deserialize!(f64, bool); +// impl_tuple_serialize!(f64, ZBuf); +// impl_tuple_deserialize!(f64, ZBuf); +// impl_tuple_serialize!(f64, Vec); +// impl_tuple_deserialize!(f64, Vec); +// impl_tuple_serialize!(f64, String); +// impl_tuple_deserialize!(f64, String); +// impl_tuple_serialize!(f64, &[u8]); +// impl_tuple_serialize!(bool, u8); +// impl_tuple_deserialize!(bool, u8); +// impl_tuple_serialize!(bool, u16); +// impl_tuple_deserialize!(bool, u16); +// impl_tuple_serialize!(bool, u32); +// impl_tuple_deserialize!(bool, u32); +// impl_tuple_serialize!(bool, u64); +// impl_tuple_deserialize!(bool, u64); +// impl_tuple_serialize!(bool, usize); +// impl_tuple_deserialize!(bool, usize); +// impl_tuple_serialize!(bool, i8); +// impl_tuple_deserialize!(bool, i8); +// impl_tuple_serialize!(bool, i16); +// impl_tuple_deserialize!(bool, i16); +// impl_tuple_serialize!(bool, i32); +// impl_tuple_deserialize!(bool, i32); +// impl_tuple_serialize!(bool, isize); +// impl_tuple_deserialize!(bool, isize); +// impl_tuple_serialize!(bool, f32); +// impl_tuple_deserialize!(bool, f32); +// impl_tuple_serialize!(bool, f64); +// impl_tuple_deserialize!(bool, f64); +// impl_tuple_serialize!(bool, bool); +// impl_tuple_deserialize!(bool, bool); +// impl_tuple_serialize!(bool, ZBuf); +// impl_tuple_deserialize!(bool, ZBuf); +// impl_tuple_serialize!(bool, Vec); +// impl_tuple_deserialize!(bool, Vec); +// impl_tuple_serialize!(bool, String); +// impl_tuple_deserialize!(bool, String); +// impl_tuple_serialize!(bool, &[u8]); +// impl_tuple_serialize!(ZBuf, u8); +// impl_tuple_deserialize!(ZBuf, u8); +// impl_tuple_serialize!(ZBuf, u16); +// impl_tuple_deserialize!(ZBuf, u16); +// impl_tuple_serialize!(ZBuf, u32); +// impl_tuple_deserialize!(ZBuf, u32); +// impl_tuple_serialize!(ZBuf, u64); +// impl_tuple_deserialize!(ZBuf, u64); +// impl_tuple_serialize!(ZBuf, usize); +// impl_tuple_deserialize!(ZBuf, usize); +// impl_tuple_serialize!(ZBuf, i8); +// impl_tuple_deserialize!(ZBuf, i8); +// impl_tuple_serialize!(ZBuf, i16); +// impl_tuple_deserialize!(ZBuf, i16); +// impl_tuple_serialize!(ZBuf, i32); +// impl_tuple_deserialize!(ZBuf, i32); +// impl_tuple_serialize!(ZBuf, isize); +// impl_tuple_deserialize!(ZBuf, isize); +// impl_tuple_serialize!(ZBuf, f32); +// impl_tuple_deserialize!(ZBuf, f32); +// impl_tuple_serialize!(ZBuf, f64); +// impl_tuple_deserialize!(ZBuf, f64); +// impl_tuple_serialize!(ZBuf, bool); +// impl_tuple_deserialize!(ZBuf, bool); +// impl_tuple_serialize!(ZBuf, ZBuf); +// impl_tuple_deserialize!(ZBuf, ZBuf); +// impl_tuple_serialize!(ZBuf, Vec); +// impl_tuple_deserialize!(ZBuf, Vec); +// impl_tuple_serialize!(ZBuf, String); +// impl_tuple_deserialize!(ZBuf, String); +// impl_tuple_serialize!(ZBuf, &[u8]); +// impl_tuple_serialize!(Vec, u8); +// impl_tuple_deserialize!(Vec, u8); +// impl_tuple_serialize!(Vec, u16); +// impl_tuple_deserialize!(Vec, u16); +// impl_tuple_serialize!(Vec, u32); +// impl_tuple_deserialize!(Vec, u32); +// impl_tuple_serialize!(Vec, u64); +// impl_tuple_deserialize!(Vec, u64); +// impl_tuple_serialize!(Vec, usize); +// impl_tuple_deserialize!(Vec, usize); +// impl_tuple_serialize!(Vec, i8); +// impl_tuple_deserialize!(Vec, i8); +// impl_tuple_serialize!(Vec, i16); +// impl_tuple_deserialize!(Vec, i16); +// impl_tuple_serialize!(Vec, i32); +// impl_tuple_deserialize!(Vec, i32); +// impl_tuple_serialize!(Vec, isize); +// impl_tuple_deserialize!(Vec, isize); +// impl_tuple_serialize!(Vec, f32); +// impl_tuple_deserialize!(Vec, f32); +// impl_tuple_serialize!(Vec, f64); +// impl_tuple_deserialize!(Vec, f64); +// impl_tuple_serialize!(Vec, bool); +// impl_tuple_deserialize!(Vec, bool); +// impl_tuple_serialize!(Vec, ZBuf); +// impl_tuple_deserialize!(Vec, ZBuf); +// impl_tuple_serialize!(Vec, Vec); +// impl_tuple_deserialize!(Vec, Vec); +// impl_tuple_serialize!(Vec, String); +// impl_tuple_deserialize!(Vec, String); +// impl_tuple_serialize!(Vec, &[u8]); +// impl_tuple_serialize!(String, u8); +// impl_tuple_deserialize!(String, u8); +// impl_tuple_serialize!(String, u16); +// impl_tuple_deserialize!(String, u16); +// impl_tuple_serialize!(String, u32); +// impl_tuple_deserialize!(String, u32); +// impl_tuple_serialize!(String, u64); +// impl_tuple_deserialize!(String, u64); +// impl_tuple_serialize!(String, usize); +// impl_tuple_deserialize!(String, usize); +// impl_tuple_serialize!(String, i8); +// impl_tuple_deserialize!(String, i8); +// impl_tuple_serialize!(String, i16); +// impl_tuple_deserialize!(String, i16); +// impl_tuple_serialize!(String, i32); +// impl_tuple_deserialize!(String, i32); +// impl_tuple_serialize!(String, isize); +// impl_tuple_deserialize!(String, isize); +// impl_tuple_serialize!(String, f32); +// impl_tuple_deserialize!(String, f32); +// impl_tuple_serialize!(String, f64); +// impl_tuple_deserialize!(String, f64); +// impl_tuple_serialize!(String, bool); +// impl_tuple_deserialize!(String, bool); +// impl_tuple_serialize!(String, ZBuf); +// impl_tuple_deserialize!(String, ZBuf); +// impl_tuple_serialize!(String, Vec); +// impl_tuple_deserialize!(String, Vec); +// impl_tuple_serialize!(String, String); +// impl_tuple_deserialize!(String, String); +// impl_tuple_serialize!(String, &[u8]); +// impl_tuple_serialize!(&[u8], u8); +// impl_tuple_serialize!(&[u8], u16); +// impl_tuple_serialize!(&[u8], u32); +// impl_tuple_serialize!(&[u8], u64); +// impl_tuple_serialize!(&[u8], usize); +// impl_tuple_serialize!(&[u8], i8); +// impl_tuple_serialize!(&[u8], i16); +// impl_tuple_serialize!(&[u8], i32); +// impl_tuple_serialize!(&[u8], isize); +// impl_tuple_serialize!(&[u8], f32); +// impl_tuple_serialize!(&[u8], f64); +// impl_tuple_serialize!(&[u8], bool); +// impl_tuple_serialize!(&[u8], ZBuf); +// impl_tuple_serialize!(&[u8], Vec); +// impl_tuple_serialize!(&[u8], String); +// impl_tuple_serialize!(&[u8], &[u8]); +// impl_iterator_serialize!(u8); +// impl_iterator_serialize!(u16); +// impl_iterator_serialize!(u32); +// impl_iterator_serialize!(u64); +// impl_iterator_serialize!(usize); +// impl_iterator_serialize!(i8); +// impl_iterator_serialize!(i16); +// impl_iterator_serialize!(i32); +// impl_iterator_serialize!(isize); +// impl_iterator_serialize!(f32); +// impl_iterator_serialize!(f64); +// impl_iterator_serialize!(bool); +// impl_iterator_serialize!(ZBuf); +// impl_iterator_serialize!(Vec); +// impl_iterator_serialize!(String); +// impl_iterator_serialize!(&[u8]); diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 58589bfe8f..ae9119ac8a 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -19,6 +19,7 @@ use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::QoS; +#[cfg(feature = "unstable")] use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 9bc6c9c331..0c1c193568 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -57,12 +57,7 @@ use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; #[cfg(feature = "unstable")] -use zenoh_protocol::network::declare::SubscriberId; -use zenoh_protocol::network::AtomicRequestId; -use zenoh_protocol::network::RequestId; -use zenoh_protocol::zenoh::reply::ReplyBody; -use zenoh_protocol::zenoh::Del; -use zenoh_protocol::zenoh::Put; +use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, @@ -74,13 +69,13 @@ use zenoh_protocol::{ subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, - ext, request::{self, ext::TargetType, Request}, - Mapping, Push, Response, ResponseFinal, + AtomicRequestId, Mapping, Push, RequestId, Response, ResponseFinal, }, zenoh::{ query::{self, ext::QueryBodyType, Consolidation}, - PushBody, RequestBody, ResponseBody, + reply::ReplyBody, + Del, PushBody, Put, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 60a31a6577..47d41ebb1f 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -202,9 +202,6 @@ pub struct SubscriberBuilder<'a, 'b, Handler> { #[cfg(not(feature = "unstable"))] pub(crate) reliability: Reliability, - #[cfg(not(feature = "unstable"))] - pub(crate) mode: Mode, - #[cfg(feature = "unstable")] pub origin: Locality, #[cfg(not(feature = "unstable"))] From 3dea601356c7fdb08f14c7ce6c94e732db5b1836 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 8 Apr 2024 18:33:22 +0200 Subject: [PATCH 177/357] Payload iter impl --- zenoh/src/payload.rs | 821 ++++--------------------------------------- 1 file changed, 67 insertions(+), 754 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index db3126d93d..ed8c1b98c3 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -80,10 +80,11 @@ impl Payload { } /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. - pub fn iter(&self) -> PayloadIterator<'_, T> + pub fn iter<'a, T>(&'a self) -> PayloadIterator<'a, T> where T: TryFrom, - ZSerde: for<'b> Deserialize<'b, T, Error = ZDeserializeError>, + ZSerde: Deserialize<'a, T>, + >::Error: Debug, { PayloadIterator { reader: self.0.reader(), @@ -144,7 +145,8 @@ where impl<'a, T> Iterator for PayloadIterator<'a, T> where - ZSerde: for<'b> Deserialize<'b, T, Error = ZDeserializeError>, + ZSerde: for<'b> Deserialize<'b, T>, + >::Error: Debug, { type Item = T; @@ -164,6 +166,28 @@ where } } +impl FromIterator for Payload +where + ZSerde: Serialize, +{ + fn from_iter>(iter: T) -> Self { + let codec = Zenoh080::new(); + let mut buffer: ZBuf = ZBuf::empty(); + let mut writer = buffer.writer(); + for t in iter { + let tpld = ZSerde.serialize(t); + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &tpld.0).unwrap_unchecked(); + } + } + + Payload::new(buffer) + } +} + /// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. /// It also supports common Rust serde values. #[derive(Clone, Copy, Debug)] @@ -786,6 +810,16 @@ where } } +impl From<(A, B)> for Payload +where + A: Into, + B: Into, +{ + fn from(value: (A, B)) -> Self { + ZSerde.serialize(value) + } +} + impl<'a, A, B> Deserialize<'a, (A, B)> for ZSerde where A: TryFrom, @@ -811,31 +845,19 @@ where } } -// Iterator -// impl Serialize for ZSerde -// where -// I: Iterator, -// T: Into, -// { -// type Output = Payload; - -// fn serialize(self, iter: I) -> Self::Output { -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// for t in iter { -// let tpld: Payload = t.into(); -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); -// } -// } - -// Payload::new(buffer) -// } -// } +impl TryFrom for (A, B) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, +{ + type Error = ZError; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} // For convenience to always convert a Value the examples #[derive(Debug, Clone, PartialEq, Eq)] @@ -977,729 +999,20 @@ mod tests { serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); // Iterator - // let mut hm = Vec::new(); - // hm.push(0); - // hm.push(1); - // Payload::serialize(hm.iter()); - - // let mut hm = HashMap::new(); - // hm.insert(0, 0); - // hm.insert(1, 1); - // Payload::serialize(hm.iter().map(|(k, v)| (k, v))); - // for (k, v) in sample.payload().iter::<(String, serde_json::Value)>() {} - } -} - -// macro_rules! impl_iterator_inner { -// ($iter:expr) => {{ -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// for t in $iter { -// let tpld = ZSerde.serialize(t); -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); -// } -// } - -// Payload::new(buffer) -// }}; -// } - -// impl<'a> Serialize> for ZSerde { -// type Output = Payload; - -// fn serialize(self, iter: std::slice::Iter<'_, i32>) -> Self::Output { -// impl_iterator_inner!(iter) -// } -// } - -// impl<'a> Serialize> for ZSerde { -// type Output = Payload; - -// fn serialize(self, iter: std::slice::IterMut<'_, i32>) -> Self::Output { -// impl_iterator_inner!(iter) -// } -// } - -// impl Serialize<&mut dyn Iterator> for ZSerde { -// type Output = Payload; - -// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// for t in iter { -// let tpld = ZSerde.serialize(t); -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); -// } -// } - -// Payload::new(buffer) -// } -// } - -// impl Serialize<(A, B)> for ZSerde -// where -// ZSerde: Serialize, -// ZSerde: Serialize, -// { -// type Output = Payload; - -// fn serialize(self, t: (A, B)) -> Self::Output { -// let (a, b) = t; - -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// let apld = Payload::serialize::(a); -// let bpld = Payload::serialize::(b); - -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &apld.0).unwrap_unchecked(); -// codec.write(&mut writer, &bpld.0).unwrap_unchecked(); -// } - -// Payload::new(buffer) -// } -// } - -// impl<'a, A, B> Deserialize<'a, (A, B)> for ZSerde -// where -// A: TryFrom, -// ZSerde: Deserialize<'a, A>, -// >::Error: Debug, -// B: TryFrom, -// ZSerde: Deserialize<'a, B>, -// >::Error: Debug, -// { -// type Error = ZError; - -// fn deserialize(self, payload: &'a Payload) -> Result<(A, B), Self::Error> { -// let codec = Zenoh080::new(); -// let mut reader = payload.0.reader(); - -// let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; -// let apld = Payload::new(abuf); - -// let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; -// let bpld = Payload::new(bbuf); - -// let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; -// let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; -// Ok((a, b)) -// } -// } - -// impl Serialize<&mut dyn Iterator> for ZSerde -// where -// ZSerde: Serialize, -// { -// type Output = Payload; - -// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// for t in iter { -// let tpld = ZSerde.serialize(t); -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); -// } -// } - -// Payload::new(buffer) -// } -// } - -// Iterator -// macro_rules! impl_iterator_serialize { -// ($a:ty) => { -// impl Serialize<&mut dyn Iterator> for ZSerde -// { -// type Output = Payload; - -// fn serialize(self, iter: &mut dyn Iterator) -> Self::Output { -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// for t in iter { -// let tpld = ZSerde.serialize(t); -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &tpld.0).unwrap_unchecked(); -// } -// } - -// Payload::new(buffer) -// } -// } -// }; -// } - -// Tuples -// macro_rules! impl_tuple_serialize { -// ($a:ty, $b:ty) => { -// impl Serialize<($a, $b)> for ZSerde -// { -// type Output = Payload; - -// fn serialize(self, t: ($a, $b)) -> Self::Output { -// let (a, b) = t; - -// let codec = Zenoh080::new(); -// let mut buffer: ZBuf = ZBuf::empty(); -// let mut writer = buffer.writer(); -// let apld = Payload::serialize::<$a>(a); -// let bpld = Payload::serialize::<$b>(b); - -// // SAFETY: we are serializing slices on a ZBuf, so serialization will never -// // fail unless we run out of memory. In that case, Rust memory allocator -// // will panic before the serializer has any chance to fail. -// unsafe { -// codec.write(&mut writer, &apld.0).unwrap_unchecked(); -// codec.write(&mut writer, &bpld.0).unwrap_unchecked(); -// } - -// Payload::new(buffer) -// } -// } -// } - -// } - -// macro_rules! impl_tuple_deserialize { -// ($a:ty, $b:ty) => { -// impl<'a> Deserialize<'a, ($a, $b)> for ZSerde { -// type Error = ZError; - -// fn deserialize(self, payload: &'a Payload) -> Result<($a, $b), Self::Error> { -// let codec = Zenoh080::new(); -// let mut reader = payload.0.reader(); - -// let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; -// let apld = Payload::new(abuf); - -// let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; -// let bpld = Payload::new(bbuf); - -// let a = apld.deserialize::<$a>().map_err(|e| zerror!("{:?}", e))?; -// let b = bpld.deserialize::<$b>().map_err(|e| zerror!("{:?}", e))?; -// Ok((a, b)) -// } -// } -// }; -// } - -// impl_tuple_serialize!(u8, u8); -// impl_tuple_deserialize!(u8, u8); -// impl_tuple_serialize!(u8, u16); -// impl_tuple_deserialize!(u8, u16); -// impl_tuple_serialize!(u8, u32); -// impl_tuple_deserialize!(u8, u32); -// impl_tuple_serialize!(u8, u64); -// impl_tuple_deserialize!(u8, u64); -// impl_tuple_serialize!(u8, usize); -// impl_tuple_deserialize!(u8, usize); -// impl_tuple_serialize!(u8, i8); -// impl_tuple_deserialize!(u8, i8); -// impl_tuple_serialize!(u8, i16); -// impl_tuple_deserialize!(u8, i16); -// impl_tuple_serialize!(u8, i32); -// impl_tuple_deserialize!(u8, i32); -// impl_tuple_serialize!(u8, isize); -// impl_tuple_deserialize!(u8, isize); -// impl_tuple_serialize!(u8, f32); -// impl_tuple_deserialize!(u8, f32); -// impl_tuple_serialize!(u8, f64); -// impl_tuple_deserialize!(u8, f64); -// impl_tuple_serialize!(u8, bool); -// impl_tuple_deserialize!(u8, bool); -// impl_tuple_serialize!(u8, ZBuf); -// impl_tuple_deserialize!(u8, ZBuf); -// impl_tuple_serialize!(u8, Vec); -// impl_tuple_deserialize!(u8, Vec); -// impl_tuple_serialize!(u8, String); -// impl_tuple_deserialize!(u8, String); -// impl_tuple_serialize!(u8, &[u8]); -// impl_tuple_serialize!(u16, u8); -// impl_tuple_deserialize!(u16, u8); -// impl_tuple_serialize!(u16, u16); -// impl_tuple_deserialize!(u16, u16); -// impl_tuple_serialize!(u16, u32); -// impl_tuple_deserialize!(u16, u32); -// impl_tuple_serialize!(u16, u64); -// impl_tuple_deserialize!(u16, u64); -// impl_tuple_serialize!(u16, usize); -// impl_tuple_deserialize!(u16, usize); -// impl_tuple_serialize!(u16, i8); -// impl_tuple_deserialize!(u16, i8); -// impl_tuple_serialize!(u16, i16); -// impl_tuple_deserialize!(u16, i16); -// impl_tuple_serialize!(u16, i32); -// impl_tuple_deserialize!(u16, i32); -// impl_tuple_serialize!(u16, isize); -// impl_tuple_deserialize!(u16, isize); -// impl_tuple_serialize!(u16, f32); -// impl_tuple_deserialize!(u16, f32); -// impl_tuple_serialize!(u16, f64); -// impl_tuple_deserialize!(u16, f64); -// impl_tuple_serialize!(u16, bool); -// impl_tuple_deserialize!(u16, bool); -// impl_tuple_serialize!(u16, ZBuf); -// impl_tuple_deserialize!(u16, ZBuf); -// impl_tuple_serialize!(u16, Vec); -// impl_tuple_deserialize!(u16, Vec); -// impl_tuple_serialize!(u16, String); -// impl_tuple_deserialize!(u16, String); -// impl_tuple_serialize!(u16, &[u8]); -// impl_tuple_serialize!(u32, u8); -// impl_tuple_deserialize!(u32, u8); -// impl_tuple_serialize!(u32, u16); -// impl_tuple_deserialize!(u32, u16); -// impl_tuple_serialize!(u32, u32); -// impl_tuple_deserialize!(u32, u32); -// impl_tuple_serialize!(u32, u64); -// impl_tuple_deserialize!(u32, u64); -// impl_tuple_serialize!(u32, usize); -// impl_tuple_deserialize!(u32, usize); -// impl_tuple_serialize!(u32, i8); -// impl_tuple_deserialize!(u32, i8); -// impl_tuple_serialize!(u32, i16); -// impl_tuple_deserialize!(u32, i16); -// impl_tuple_serialize!(u32, i32); -// impl_tuple_deserialize!(u32, i32); -// impl_tuple_serialize!(u32, isize); -// impl_tuple_deserialize!(u32, isize); -// impl_tuple_serialize!(u32, f32); -// impl_tuple_deserialize!(u32, f32); -// impl_tuple_serialize!(u32, f64); -// impl_tuple_deserialize!(u32, f64); -// impl_tuple_serialize!(u32, bool); -// impl_tuple_deserialize!(u32, bool); -// impl_tuple_serialize!(u32, ZBuf); -// impl_tuple_deserialize!(u32, ZBuf); -// impl_tuple_serialize!(u32, Vec); -// impl_tuple_deserialize!(u32, Vec); -// impl_tuple_serialize!(u32, String); -// impl_tuple_deserialize!(u32, String); -// impl_tuple_serialize!(u32, &[u8]); -// impl_tuple_serialize!(u64, u8); -// impl_tuple_deserialize!(u64, u8); -// impl_tuple_serialize!(u64, u16); -// impl_tuple_deserialize!(u64, u16); -// impl_tuple_serialize!(u64, u32); -// impl_tuple_deserialize!(u64, u32); -// impl_tuple_serialize!(u64, u64); -// impl_tuple_deserialize!(u64, u64); -// impl_tuple_serialize!(u64, usize); -// impl_tuple_deserialize!(u64, usize); -// impl_tuple_serialize!(u64, i8); -// impl_tuple_deserialize!(u64, i8); -// impl_tuple_serialize!(u64, i16); -// impl_tuple_deserialize!(u64, i16); -// impl_tuple_serialize!(u64, i32); -// impl_tuple_deserialize!(u64, i32); -// impl_tuple_serialize!(u64, isize); -// impl_tuple_deserialize!(u64, isize); -// impl_tuple_serialize!(u64, f32); -// impl_tuple_deserialize!(u64, f32); -// impl_tuple_serialize!(u64, f64); -// impl_tuple_deserialize!(u64, f64); -// impl_tuple_serialize!(u64, bool); -// impl_tuple_deserialize!(u64, bool); -// impl_tuple_serialize!(u64, ZBuf); -// impl_tuple_deserialize!(u64, ZBuf); -// impl_tuple_serialize!(u64, Vec); -// impl_tuple_deserialize!(u64, Vec); -// impl_tuple_serialize!(u64, String); -// impl_tuple_deserialize!(u64, String); -// impl_tuple_serialize!(u64, &[u8]); -// impl_tuple_serialize!(usize, u8); -// impl_tuple_deserialize!(usize, u8); -// impl_tuple_serialize!(usize, u16); -// impl_tuple_deserialize!(usize, u16); -// impl_tuple_serialize!(usize, u32); -// impl_tuple_deserialize!(usize, u32); -// impl_tuple_serialize!(usize, u64); -// impl_tuple_deserialize!(usize, u64); -// impl_tuple_serialize!(usize, usize); -// impl_tuple_deserialize!(usize, usize); -// impl_tuple_serialize!(usize, i8); -// impl_tuple_deserialize!(usize, i8); -// impl_tuple_serialize!(usize, i16); -// impl_tuple_deserialize!(usize, i16); -// impl_tuple_serialize!(usize, i32); -// impl_tuple_deserialize!(usize, i32); -// impl_tuple_serialize!(usize, isize); -// impl_tuple_deserialize!(usize, isize); -// impl_tuple_serialize!(usize, f32); -// impl_tuple_deserialize!(usize, f32); -// impl_tuple_serialize!(usize, f64); -// impl_tuple_deserialize!(usize, f64); -// impl_tuple_serialize!(usize, bool); -// impl_tuple_deserialize!(usize, bool); -// impl_tuple_serialize!(usize, ZBuf); -// impl_tuple_deserialize!(usize, ZBuf); -// impl_tuple_serialize!(usize, Vec); -// impl_tuple_deserialize!(usize, Vec); -// impl_tuple_serialize!(usize, String); -// impl_tuple_deserialize!(usize, String); -// impl_tuple_serialize!(usize, &[u8]); -// impl_tuple_serialize!(i8, u8); -// impl_tuple_deserialize!(i8, u8); -// impl_tuple_serialize!(i8, u16); -// impl_tuple_deserialize!(i8, u16); -// impl_tuple_serialize!(i8, u32); -// impl_tuple_deserialize!(i8, u32); -// impl_tuple_serialize!(i8, u64); -// impl_tuple_deserialize!(i8, u64); -// impl_tuple_serialize!(i8, usize); -// impl_tuple_deserialize!(i8, usize); -// impl_tuple_serialize!(i8, i8); -// impl_tuple_deserialize!(i8, i8); -// impl_tuple_serialize!(i8, i16); -// impl_tuple_deserialize!(i8, i16); -// impl_tuple_serialize!(i8, i32); -// impl_tuple_deserialize!(i8, i32); -// impl_tuple_serialize!(i8, isize); -// impl_tuple_deserialize!(i8, isize); -// impl_tuple_serialize!(i8, f32); -// impl_tuple_deserialize!(i8, f32); -// impl_tuple_serialize!(i8, f64); -// impl_tuple_deserialize!(i8, f64); -// impl_tuple_serialize!(i8, bool); -// impl_tuple_deserialize!(i8, bool); -// impl_tuple_serialize!(i8, ZBuf); -// impl_tuple_deserialize!(i8, ZBuf); -// impl_tuple_serialize!(i8, Vec); -// impl_tuple_deserialize!(i8, Vec); -// impl_tuple_serialize!(i8, String); -// impl_tuple_deserialize!(i8, String); -// impl_tuple_serialize!(i8, &[u8]); -// impl_tuple_serialize!(i16, u8); -// impl_tuple_deserialize!(i16, u8); -// impl_tuple_serialize!(i16, u16); -// impl_tuple_deserialize!(i16, u16); -// impl_tuple_serialize!(i16, u32); -// impl_tuple_deserialize!(i16, u32); -// impl_tuple_serialize!(i16, u64); -// impl_tuple_deserialize!(i16, u64); -// impl_tuple_serialize!(i16, usize); -// impl_tuple_deserialize!(i16, usize); -// impl_tuple_serialize!(i16, i8); -// impl_tuple_deserialize!(i16, i8); -// impl_tuple_serialize!(i16, i16); -// impl_tuple_deserialize!(i16, i16); -// impl_tuple_serialize!(i16, i32); -// impl_tuple_deserialize!(i16, i32); -// impl_tuple_serialize!(i16, isize); -// impl_tuple_deserialize!(i16, isize); -// impl_tuple_serialize!(i16, f32); -// impl_tuple_deserialize!(i16, f32); -// impl_tuple_serialize!(i16, f64); -// impl_tuple_deserialize!(i16, f64); -// impl_tuple_serialize!(i16, bool); -// impl_tuple_deserialize!(i16, bool); -// impl_tuple_serialize!(i16, ZBuf); -// impl_tuple_deserialize!(i16, ZBuf); -// impl_tuple_serialize!(i16, Vec); -// impl_tuple_deserialize!(i16, Vec); -// impl_tuple_serialize!(i16, String); -// impl_tuple_deserialize!(i16, String); -// impl_tuple_serialize!(i16, &[u8]); -// impl_tuple_serialize!(i32, u8); -// impl_tuple_deserialize!(i32, u8); -// impl_tuple_serialize!(i32, u16); -// impl_tuple_deserialize!(i32, u16); -// impl_tuple_serialize!(i32, u32); -// impl_tuple_deserialize!(i32, u32); -// impl_tuple_serialize!(i32, u64); -// impl_tuple_deserialize!(i32, u64); -// impl_tuple_serialize!(i32, usize); -// impl_tuple_deserialize!(i32, usize); -// impl_tuple_serialize!(i32, i8); -// impl_tuple_deserialize!(i32, i8); -// impl_tuple_serialize!(i32, i16); -// impl_tuple_deserialize!(i32, i16); -// impl_tuple_serialize!(i32, i32); -// impl_tuple_deserialize!(i32, i32); -// impl_tuple_serialize!(i32, isize); -// impl_tuple_deserialize!(i32, isize); -// impl_tuple_serialize!(i32, f32); -// impl_tuple_deserialize!(i32, f32); -// impl_tuple_serialize!(i32, f64); -// impl_tuple_deserialize!(i32, f64); -// impl_tuple_serialize!(i32, bool); -// impl_tuple_deserialize!(i32, bool); -// impl_tuple_serialize!(i32, ZBuf); -// impl_tuple_deserialize!(i32, ZBuf); -// impl_tuple_serialize!(i32, Vec); -// impl_tuple_deserialize!(i32, Vec); -// impl_tuple_serialize!(i32, String); -// impl_tuple_deserialize!(i32, String); -// impl_tuple_serialize!(i32, &[u8]); -// impl_tuple_serialize!(isize, u8); -// impl_tuple_deserialize!(isize, u8); -// impl_tuple_serialize!(isize, u16); -// impl_tuple_deserialize!(isize, u16); -// impl_tuple_serialize!(isize, u32); -// impl_tuple_deserialize!(isize, u32); -// impl_tuple_serialize!(isize, u64); -// impl_tuple_deserialize!(isize, u64); -// impl_tuple_serialize!(isize, usize); -// impl_tuple_deserialize!(isize, usize); -// impl_tuple_serialize!(isize, i8); -// impl_tuple_deserialize!(isize, i8); -// impl_tuple_serialize!(isize, i16); -// impl_tuple_deserialize!(isize, i16); -// impl_tuple_serialize!(isize, i32); -// impl_tuple_deserialize!(isize, i32); -// impl_tuple_serialize!(isize, isize); -// impl_tuple_deserialize!(isize, isize); -// impl_tuple_serialize!(isize, f32); -// impl_tuple_deserialize!(isize, f32); -// impl_tuple_serialize!(isize, f64); -// impl_tuple_deserialize!(isize, f64); -// impl_tuple_serialize!(isize, bool); -// impl_tuple_deserialize!(isize, bool); -// impl_tuple_serialize!(isize, ZBuf); -// impl_tuple_deserialize!(isize, ZBuf); -// impl_tuple_serialize!(isize, Vec); -// impl_tuple_deserialize!(isize, Vec); -// impl_tuple_serialize!(isize, String); -// impl_tuple_deserialize!(isize, String); -// impl_tuple_serialize!(isize, &[u8]); -// impl_tuple_serialize!(f32, u8); -// impl_tuple_deserialize!(f32, u8); -// impl_tuple_serialize!(f32, u16); -// impl_tuple_deserialize!(f32, u16); -// impl_tuple_serialize!(f32, u32); -// impl_tuple_deserialize!(f32, u32); -// impl_tuple_serialize!(f32, u64); -// impl_tuple_deserialize!(f32, u64); -// impl_tuple_serialize!(f32, usize); -// impl_tuple_deserialize!(f32, usize); -// impl_tuple_serialize!(f32, i8); -// impl_tuple_deserialize!(f32, i8); -// impl_tuple_serialize!(f32, i16); -// impl_tuple_deserialize!(f32, i16); -// impl_tuple_serialize!(f32, i32); -// impl_tuple_deserialize!(f32, i32); -// impl_tuple_serialize!(f32, isize); -// impl_tuple_deserialize!(f32, isize); -// impl_tuple_serialize!(f32, f32); -// impl_tuple_deserialize!(f32, f32); -// impl_tuple_serialize!(f32, f64); -// impl_tuple_deserialize!(f32, f64); -// impl_tuple_serialize!(f32, bool); -// impl_tuple_deserialize!(f32, bool); -// impl_tuple_serialize!(f32, ZBuf); -// impl_tuple_deserialize!(f32, ZBuf); -// impl_tuple_serialize!(f32, Vec); -// impl_tuple_deserialize!(f32, Vec); -// impl_tuple_serialize!(f32, String); -// impl_tuple_deserialize!(f32, String); -// impl_tuple_serialize!(f32, &[u8]); -// impl_tuple_serialize!(f64, u8); -// impl_tuple_deserialize!(f64, u8); -// impl_tuple_serialize!(f64, u16); -// impl_tuple_deserialize!(f64, u16); -// impl_tuple_serialize!(f64, u32); -// impl_tuple_deserialize!(f64, u32); -// impl_tuple_serialize!(f64, u64); -// impl_tuple_deserialize!(f64, u64); -// impl_tuple_serialize!(f64, usize); -// impl_tuple_deserialize!(f64, usize); -// impl_tuple_serialize!(f64, i8); -// impl_tuple_deserialize!(f64, i8); -// impl_tuple_serialize!(f64, i16); -// impl_tuple_deserialize!(f64, i16); -// impl_tuple_serialize!(f64, i32); -// impl_tuple_deserialize!(f64, i32); -// impl_tuple_serialize!(f64, isize); -// impl_tuple_deserialize!(f64, isize); -// impl_tuple_serialize!(f64, f32); -// impl_tuple_deserialize!(f64, f32); -// impl_tuple_serialize!(f64, f64); -// impl_tuple_deserialize!(f64, f64); -// impl_tuple_serialize!(f64, bool); -// impl_tuple_deserialize!(f64, bool); -// impl_tuple_serialize!(f64, ZBuf); -// impl_tuple_deserialize!(f64, ZBuf); -// impl_tuple_serialize!(f64, Vec); -// impl_tuple_deserialize!(f64, Vec); -// impl_tuple_serialize!(f64, String); -// impl_tuple_deserialize!(f64, String); -// impl_tuple_serialize!(f64, &[u8]); -// impl_tuple_serialize!(bool, u8); -// impl_tuple_deserialize!(bool, u8); -// impl_tuple_serialize!(bool, u16); -// impl_tuple_deserialize!(bool, u16); -// impl_tuple_serialize!(bool, u32); -// impl_tuple_deserialize!(bool, u32); -// impl_tuple_serialize!(bool, u64); -// impl_tuple_deserialize!(bool, u64); -// impl_tuple_serialize!(bool, usize); -// impl_tuple_deserialize!(bool, usize); -// impl_tuple_serialize!(bool, i8); -// impl_tuple_deserialize!(bool, i8); -// impl_tuple_serialize!(bool, i16); -// impl_tuple_deserialize!(bool, i16); -// impl_tuple_serialize!(bool, i32); -// impl_tuple_deserialize!(bool, i32); -// impl_tuple_serialize!(bool, isize); -// impl_tuple_deserialize!(bool, isize); -// impl_tuple_serialize!(bool, f32); -// impl_tuple_deserialize!(bool, f32); -// impl_tuple_serialize!(bool, f64); -// impl_tuple_deserialize!(bool, f64); -// impl_tuple_serialize!(bool, bool); -// impl_tuple_deserialize!(bool, bool); -// impl_tuple_serialize!(bool, ZBuf); -// impl_tuple_deserialize!(bool, ZBuf); -// impl_tuple_serialize!(bool, Vec); -// impl_tuple_deserialize!(bool, Vec); -// impl_tuple_serialize!(bool, String); -// impl_tuple_deserialize!(bool, String); -// impl_tuple_serialize!(bool, &[u8]); -// impl_tuple_serialize!(ZBuf, u8); -// impl_tuple_deserialize!(ZBuf, u8); -// impl_tuple_serialize!(ZBuf, u16); -// impl_tuple_deserialize!(ZBuf, u16); -// impl_tuple_serialize!(ZBuf, u32); -// impl_tuple_deserialize!(ZBuf, u32); -// impl_tuple_serialize!(ZBuf, u64); -// impl_tuple_deserialize!(ZBuf, u64); -// impl_tuple_serialize!(ZBuf, usize); -// impl_tuple_deserialize!(ZBuf, usize); -// impl_tuple_serialize!(ZBuf, i8); -// impl_tuple_deserialize!(ZBuf, i8); -// impl_tuple_serialize!(ZBuf, i16); -// impl_tuple_deserialize!(ZBuf, i16); -// impl_tuple_serialize!(ZBuf, i32); -// impl_tuple_deserialize!(ZBuf, i32); -// impl_tuple_serialize!(ZBuf, isize); -// impl_tuple_deserialize!(ZBuf, isize); -// impl_tuple_serialize!(ZBuf, f32); -// impl_tuple_deserialize!(ZBuf, f32); -// impl_tuple_serialize!(ZBuf, f64); -// impl_tuple_deserialize!(ZBuf, f64); -// impl_tuple_serialize!(ZBuf, bool); -// impl_tuple_deserialize!(ZBuf, bool); -// impl_tuple_serialize!(ZBuf, ZBuf); -// impl_tuple_deserialize!(ZBuf, ZBuf); -// impl_tuple_serialize!(ZBuf, Vec); -// impl_tuple_deserialize!(ZBuf, Vec); -// impl_tuple_serialize!(ZBuf, String); -// impl_tuple_deserialize!(ZBuf, String); -// impl_tuple_serialize!(ZBuf, &[u8]); -// impl_tuple_serialize!(Vec, u8); -// impl_tuple_deserialize!(Vec, u8); -// impl_tuple_serialize!(Vec, u16); -// impl_tuple_deserialize!(Vec, u16); -// impl_tuple_serialize!(Vec, u32); -// impl_tuple_deserialize!(Vec, u32); -// impl_tuple_serialize!(Vec, u64); -// impl_tuple_deserialize!(Vec, u64); -// impl_tuple_serialize!(Vec, usize); -// impl_tuple_deserialize!(Vec, usize); -// impl_tuple_serialize!(Vec, i8); -// impl_tuple_deserialize!(Vec, i8); -// impl_tuple_serialize!(Vec, i16); -// impl_tuple_deserialize!(Vec, i16); -// impl_tuple_serialize!(Vec, i32); -// impl_tuple_deserialize!(Vec, i32); -// impl_tuple_serialize!(Vec, isize); -// impl_tuple_deserialize!(Vec, isize); -// impl_tuple_serialize!(Vec, f32); -// impl_tuple_deserialize!(Vec, f32); -// impl_tuple_serialize!(Vec, f64); -// impl_tuple_deserialize!(Vec, f64); -// impl_tuple_serialize!(Vec, bool); -// impl_tuple_deserialize!(Vec, bool); -// impl_tuple_serialize!(Vec, ZBuf); -// impl_tuple_deserialize!(Vec, ZBuf); -// impl_tuple_serialize!(Vec, Vec); -// impl_tuple_deserialize!(Vec, Vec); -// impl_tuple_serialize!(Vec, String); -// impl_tuple_deserialize!(Vec, String); -// impl_tuple_serialize!(Vec, &[u8]); -// impl_tuple_serialize!(String, u8); -// impl_tuple_deserialize!(String, u8); -// impl_tuple_serialize!(String, u16); -// impl_tuple_deserialize!(String, u16); -// impl_tuple_serialize!(String, u32); -// impl_tuple_deserialize!(String, u32); -// impl_tuple_serialize!(String, u64); -// impl_tuple_deserialize!(String, u64); -// impl_tuple_serialize!(String, usize); -// impl_tuple_deserialize!(String, usize); -// impl_tuple_serialize!(String, i8); -// impl_tuple_deserialize!(String, i8); -// impl_tuple_serialize!(String, i16); -// impl_tuple_deserialize!(String, i16); -// impl_tuple_serialize!(String, i32); -// impl_tuple_deserialize!(String, i32); -// impl_tuple_serialize!(String, isize); -// impl_tuple_deserialize!(String, isize); -// impl_tuple_serialize!(String, f32); -// impl_tuple_deserialize!(String, f32); -// impl_tuple_serialize!(String, f64); -// impl_tuple_deserialize!(String, f64); -// impl_tuple_serialize!(String, bool); -// impl_tuple_deserialize!(String, bool); -// impl_tuple_serialize!(String, ZBuf); -// impl_tuple_deserialize!(String, ZBuf); -// impl_tuple_serialize!(String, Vec); -// impl_tuple_deserialize!(String, Vec); -// impl_tuple_serialize!(String, String); -// impl_tuple_deserialize!(String, String); -// impl_tuple_serialize!(String, &[u8]); -// impl_tuple_serialize!(&[u8], u8); -// impl_tuple_serialize!(&[u8], u16); -// impl_tuple_serialize!(&[u8], u32); -// impl_tuple_serialize!(&[u8], u64); -// impl_tuple_serialize!(&[u8], usize); -// impl_tuple_serialize!(&[u8], i8); -// impl_tuple_serialize!(&[u8], i16); -// impl_tuple_serialize!(&[u8], i32); -// impl_tuple_serialize!(&[u8], isize); -// impl_tuple_serialize!(&[u8], f32); -// impl_tuple_serialize!(&[u8], f64); -// impl_tuple_serialize!(&[u8], bool); -// impl_tuple_serialize!(&[u8], ZBuf); -// impl_tuple_serialize!(&[u8], Vec); -// impl_tuple_serialize!(&[u8], String); -// impl_tuple_serialize!(&[u8], &[u8]); -// impl_iterator_serialize!(u8); -// impl_iterator_serialize!(u16); -// impl_iterator_serialize!(u32); -// impl_iterator_serialize!(u64); -// impl_iterator_serialize!(usize); -// impl_iterator_serialize!(i8); -// impl_iterator_serialize!(i16); -// impl_iterator_serialize!(i32); -// impl_iterator_serialize!(isize); -// impl_iterator_serialize!(f32); -// impl_iterator_serialize!(f64); -// impl_iterator_serialize!(bool); -// impl_iterator_serialize!(ZBuf); -// impl_iterator_serialize!(Vec); -// impl_iterator_serialize!(String); -// impl_iterator_serialize!(&[u8]); + let v: [usize; 5] = [0, 1, 2, 3, 4]; + let p = Payload::from_iter(v.iter()); + for (i, t) in p.iter::().enumerate() { + assert_eq!(i, t); + } + + use std::collections::HashMap; + let mut hm: HashMap = HashMap::new(); + hm.insert(0, 0); + hm.insert(1, 1); + let p = Payload::from_iter(hm.iter()); + // for (i, (k, v)) in p.iter::<(usize, usize)>().enumerate() { + // assert_eq!(i, k); + // assert_eq!(i, v); + // } + } +} From a25676b4c468c408c31f74d2a896be315a1d7f1a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 8 Apr 2024 20:08:36 +0200 Subject: [PATCH 178/357] Improve payload serde --- zenoh/src/payload.rs | 272 ++++++++++++++++++++++++++++++++----------- 1 file changed, 202 insertions(+), 70 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index ed8c1b98c3..3c4709a6ae 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -211,6 +211,20 @@ impl From for Payload { } } +impl Serialize<&ZBuf> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &ZBuf) -> Self::Output { + Payload::new(t.clone()) + } +} + +impl From<&ZBuf> for Payload { + fn from(t: &ZBuf) -> Self { + ZSerde.serialize(t) + } +} + impl Deserialize<'_, ZBuf> for ZSerde { type Error = Infallible; @@ -246,6 +260,20 @@ impl From> for Payload { } } +impl Serialize<&Vec> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &Vec) -> Self::Output { + Payload::new(t.clone()) + } +} + +impl From<&Vec> for Payload { + fn from(t: &Vec) -> Self { + ZSerde.serialize(t) + } +} + impl Deserialize<'_, Vec> for ZSerde { type Error = Infallible; @@ -296,6 +324,20 @@ impl From> for Payload { } } +impl<'a> Serialize<&Cow<'a, [u8]>> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &Cow<'a, [u8]>) -> Self::Output { + Payload::new(t.to_vec()) + } +} + +impl From<&Cow<'_, [u8]>> for Payload { + fn from(t: &Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { type Error = Infallible; @@ -325,6 +367,20 @@ impl From for Payload { } } +impl Serialize<&String> for ZSerde { + type Output = Payload; + + fn serialize(self, s: &String) -> Self::Output { + Payload::new(s.clone().into_bytes()) + } +} + +impl From<&String> for Payload { + fn from(t: &String) -> Self { + ZSerde.serialize(t) + } +} + impl Deserialize<'_, String> for ZSerde { type Error = FromUtf8Error; @@ -380,6 +436,20 @@ impl From> for Payload { } } +impl<'a> Serialize<&Cow<'a, str>> for ZSerde { + type Output = Payload; + + fn serialize(self, s: &Cow<'a, str>) -> Self::Output { + Self.serialize(s.to_string()) + } +} + +impl From<&Cow<'_, str>> for Payload { + fn from(t: &Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { type Error = FromUtf8Error; @@ -437,20 +507,6 @@ macro_rules! impl_int { } } - impl Serialize<&mut $t> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &mut $t) -> Self::Output { - ZSerde.serialize(*t) - } - } - - impl From<&mut $t> for Payload { - fn from(t: &mut $t) -> Self { - ZSerde.serialize(t) - } - } - impl<'a> Deserialize<'a, $t> for ZSerde { type Error = ZDeserializeError; @@ -522,6 +578,20 @@ impl From for Payload { } } +impl Serialize<&bool> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &bool) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&bool> for Payload { + fn from(t: &bool) -> Self { + ZSerde.serialize(t) + } +} + impl Deserialize<'_, bool> for ZSerde { type Error = ZDeserializeError; @@ -535,6 +605,14 @@ impl Deserialize<'_, bool> for ZSerde { } } +impl TryFrom for bool { + type Error = ZDeserializeError; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + impl TryFrom<&Payload> for bool { type Error = ZDeserializeError; @@ -545,36 +623,36 @@ impl TryFrom<&Payload> for bool { // - Zenoh advanced types encoders/decoders // JSON -impl Serialize<&serde_json::Value> for ZSerde { +impl Serialize for ZSerde { type Output = Result; - fn serialize(self, t: &serde_json::Value) -> Self::Output { - let mut payload = Payload::empty(); - serde_json::to_writer(payload.0.writer(), t)?; - Ok(payload) + fn serialize(self, t: serde_json::Value) -> Self::Output { + ZSerde.serialize(&t) } } -impl TryFrom<&serde_json::Value> for Payload { +impl TryFrom for Payload { type Error = serde_json::Error; - fn try_from(value: &serde_json::Value) -> Result { - ZSerde.serialize(value) + fn try_from(value: serde_json::Value) -> Result { + ZSerde.serialize(&value) } } -impl Serialize for ZSerde { +impl Serialize<&serde_json::Value> for ZSerde { type Output = Result; - fn serialize(self, t: serde_json::Value) -> Self::Output { - Self.serialize(&t) + fn serialize(self, t: &serde_json::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_json::to_writer(payload.0.writer(), t)?; + Ok(payload) } } -impl TryFrom for Payload { +impl TryFrom<&serde_json::Value> for Payload { type Error = serde_json::Error; - fn try_from(value: serde_json::Value) -> Result { + fn try_from(value: &serde_json::Value) -> Result { ZSerde.serialize(value) } } @@ -587,6 +665,14 @@ impl Deserialize<'_, serde_json::Value> for ZSerde { } } +impl TryFrom for serde_json::Value { + type Error = serde_json::Error; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + impl TryFrom<&Payload> for serde_json::Value { type Error = serde_json::Error; @@ -596,36 +682,36 @@ impl TryFrom<&Payload> for serde_json::Value { } // Yaml -impl Serialize<&serde_yaml::Value> for ZSerde { +impl Serialize for ZSerde { type Output = Result; - fn serialize(self, t: &serde_yaml::Value) -> Self::Output { - let mut payload = Payload::empty(); - serde_yaml::to_writer(payload.0.writer(), t)?; - Ok(payload) + fn serialize(self, t: serde_yaml::Value) -> Self::Output { + Self.serialize(&t) } } -impl TryFrom<&serde_yaml::Value> for Payload { +impl TryFrom for Payload { type Error = serde_yaml::Error; - fn try_from(value: &serde_yaml::Value) -> Result { + fn try_from(value: serde_yaml::Value) -> Result { ZSerde.serialize(value) } } -impl Serialize for ZSerde { +impl Serialize<&serde_yaml::Value> for ZSerde { type Output = Result; - fn serialize(self, t: serde_yaml::Value) -> Self::Output { - Self.serialize(&t) + fn serialize(self, t: &serde_yaml::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_yaml::to_writer(payload.0.writer(), t)?; + Ok(payload) } } -impl TryFrom for Payload { +impl TryFrom<&serde_yaml::Value> for Payload { type Error = serde_yaml::Error; - fn try_from(value: serde_yaml::Value) -> Result { + fn try_from(value: &serde_yaml::Value) -> Result { ZSerde.serialize(value) } } @@ -638,6 +724,14 @@ impl Deserialize<'_, serde_yaml::Value> for ZSerde { } } +impl TryFrom for serde_yaml::Value { + type Error = serde_yaml::Error; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + impl TryFrom<&Payload> for serde_yaml::Value { type Error = serde_yaml::Error; @@ -647,36 +741,36 @@ impl TryFrom<&Payload> for serde_yaml::Value { } // CBOR -impl Serialize<&serde_cbor::Value> for ZSerde { +impl Serialize for ZSerde { type Output = Result; - fn serialize(self, t: &serde_cbor::Value) -> Self::Output { - let mut payload = Payload::empty(); - serde_cbor::to_writer(payload.0.writer(), t)?; - Ok(payload) + fn serialize(self, t: serde_cbor::Value) -> Self::Output { + Self.serialize(&t) } } -impl TryFrom<&serde_cbor::Value> for Payload { +impl TryFrom for Payload { type Error = serde_cbor::Error; - fn try_from(value: &serde_cbor::Value) -> Result { + fn try_from(value: serde_cbor::Value) -> Result { ZSerde.serialize(value) } } -impl Serialize for ZSerde { +impl Serialize<&serde_cbor::Value> for ZSerde { type Output = Result; - fn serialize(self, t: serde_cbor::Value) -> Self::Output { - Self.serialize(&t) + fn serialize(self, t: &serde_cbor::Value) -> Self::Output { + let mut payload = Payload::empty(); + serde_cbor::to_writer(payload.0.writer(), t)?; + Ok(payload) } } -impl TryFrom for Payload { +impl TryFrom<&serde_cbor::Value> for Payload { type Error = serde_cbor::Error; - fn try_from(value: serde_cbor::Value) -> Result { + fn try_from(value: &serde_cbor::Value) -> Result { ZSerde.serialize(value) } } @@ -689,6 +783,14 @@ impl Deserialize<'_, serde_cbor::Value> for ZSerde { } } +impl TryFrom for serde_cbor::Value { + type Error = serde_cbor::Error; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + impl TryFrom<&Payload> for serde_cbor::Value { type Error = serde_cbor::Error; @@ -698,6 +800,22 @@ impl TryFrom<&Payload> for serde_cbor::Value { } // Pickle +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_pickle::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl TryFrom for Payload { + type Error = serde_pickle::Error; + + fn try_from(value: serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + impl Serialize<&serde_pickle::Value> for ZSerde { type Output = Result; @@ -720,27 +838,19 @@ impl TryFrom<&serde_pickle::Value> for Payload { } } -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_pickle::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for Payload { +impl Deserialize<'_, serde_pickle::Value> for ZSerde { type Error = serde_pickle::Error; - fn try_from(value: serde_pickle::Value) -> Result { - ZSerde.serialize(value) + fn deserialize(self, v: &Payload) -> Result { + serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) } } -impl Deserialize<'_, serde_pickle::Value> for ZSerde { +impl TryFrom for serde_pickle::Value { type Error = serde_pickle::Error; - fn deserialize(self, v: &Payload) -> Result { - serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) } } @@ -761,6 +871,12 @@ impl Serialize> for ZSerde { Payload::new(t) } } +#[cfg(feature = "shared-memory")] +impl From> for Payload { + fn from(t: Arc) -> Self { + ZSerde.serialize(t) + } +} #[cfg(feature = "shared-memory")] impl Serialize> for ZSerde { @@ -772,6 +888,13 @@ impl Serialize> for ZSerde { } } +#[cfg(feature = "shared-memory")] +impl From> for Payload { + fn from(t: Box) -> Self { + ZSerde.serialize(t) + } +} + #[cfg(feature = "shared-memory")] impl Serialize for ZSerde { type Output = Payload; @@ -781,6 +904,13 @@ impl Serialize for ZSerde { } } +#[cfg(feature = "shared-memory")] +impl From for Payload { + fn from(t: SharedMemoryBuf) -> Self { + ZSerde.serialize(t) + } +} + // Tuple impl Serialize<(A, B)> for ZSerde where @@ -859,7 +989,7 @@ where } } -// For convenience to always convert a Value the examples +// For convenience to always convert a Value in the examples #[derive(Debug, Clone, PartialEq, Eq)] pub enum StringOrBase64 { String(String), @@ -1000,7 +1130,9 @@ mod tests { // Iterator let v: [usize; 5] = [0, 1, 2, 3, 4]; + println!("Serialize:\t{:?}", v); let p = Payload::from_iter(v.iter()); + println!("Deerialize:\t{:?}", p); for (i, t) in p.iter::().enumerate() { assert_eq!(i, t); } @@ -1009,10 +1141,10 @@ mod tests { let mut hm: HashMap = HashMap::new(); hm.insert(0, 0); hm.insert(1, 1); + println!("Serialize:\t{:?}", hm); let p = Payload::from_iter(hm.iter()); - // for (i, (k, v)) in p.iter::<(usize, usize)>().enumerate() { - // assert_eq!(i, k); - // assert_eq!(i, v); - // } + println!("Deerialize:\t{:?}", p); + let o: HashMap = HashMap::from_iter(p.iter()); + assert_eq!(hm, o); } } From d0246076a3260e40a0df4fc0d0c2357126a37793 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 09:38:42 +0200 Subject: [PATCH 179/357] [u8;N] payload support. from_reader functionality. --- zenoh/src/payload.rs | 88 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 85 insertions(+), 3 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 3c4709a6ae..a65843dcaf 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -14,6 +14,7 @@ //! Payload primitives. use crate::buffers::ZBuf; +use std::io::Read; use std::marker::PhantomData; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, @@ -79,6 +80,16 @@ impl Payload { PayloadReader(self.0.reader()) } + /// Build a [`Payload`] from a [`Reader`]. This operation copies data from the reader. + pub fn from_reader(mut reader: R) -> Result + where + R: std::io::Read, + { + let mut buf: Vec = vec![]; + reader.read_to_end(&mut buf)?; + Ok(Payload::new(buf)) + } + /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. pub fn iter<'a, T>(&'a self) -> PayloadIterator<'a, T> where @@ -91,10 +102,7 @@ impl Payload { _t: PhantomData::, } } -} -/// Provide some facilities specific to the Rust API to encode/decode a [`Value`] with an `Serialize`. -impl Payload { /// Encode an object of type `T` as a [`Value`] using the [`ZSerde`]. /// /// ```rust @@ -125,6 +133,8 @@ impl Payload { } /// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. +#[repr(transparent)] +#[derive(Debug)] pub struct PayloadReader<'a>(ZBufReader<'a>); impl std::io::Read for PayloadReader<'_> { @@ -135,6 +145,8 @@ impl std::io::Read for PayloadReader<'_> { /// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. /// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. +#[repr(transparent)] +#[derive(Debug)] pub struct PayloadIterator<'a, T> where ZSerde: Deserialize<'a, T>, @@ -245,6 +257,65 @@ impl From<&Payload> for ZBuf { } } +// [u8; N] +impl Serialize<[u8; N]> for ZSerde { + type Output = Payload; + + fn serialize(self, t: [u8; N]) -> Self::Output { + Payload::new(t) + } +} + +impl From<[u8; N]> for Payload { + fn from(t: [u8; N]) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&[u8; N]> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &[u8; N]) -> Self::Output { + Payload::new(*t) + } +} + +impl From<&[u8; N]> for Payload { + fn from(t: &[u8; N]) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize<'_, [u8; N]> for ZSerde { + type Error = ZDeserializeError; + + fn deserialize(self, v: &Payload) -> Result<[u8; N], Self::Error> { + if v.0.len() != N { + return Err(ZDeserializeError); + } + let mut dst = [0u8; N]; + let mut reader = v.reader(); + reader.read_exact(&mut dst).map_err(|_| ZDeserializeError)?; + Ok(dst) + } +} + +impl TryFrom for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&Payload> for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } +} + // Vec impl Serialize> for ZSerde { type Output = Payload; @@ -1137,6 +1208,17 @@ mod tests { assert_eq!(i, t); } + let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; + println!("Serialize:\t{:?}", v); + let p = Payload::from_iter(v.drain(..)); + println!("Deerialize:\t{:?}", p); + let mut iter = p.iter::<[u8; 4]>(); + assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); + assert_eq!(iter.next().unwrap(), [4, 5, 6, 7]); + assert_eq!(iter.next().unwrap(), [8, 9, 10, 11]); + assert_eq!(iter.next().unwrap(), [12, 13, 14, 15]); + assert!(iter.next().is_none()); + use std::collections::HashMap; let mut hm: HashMap = HashMap::new(); hm.insert(0, 0); From 2a6bade7cc2d932cee30c18f97848c74511097cd Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 09:45:05 +0200 Subject: [PATCH 180/357] Improve payload test --- zenoh/src/payload.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index a65843dcaf..4899dd97e6 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -1224,9 +1224,9 @@ mod tests { hm.insert(0, 0); hm.insert(1, 1); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.iter()); + let p = Payload::from_iter(hm.drain()); println!("Deerialize:\t{:?}", p); - let o: HashMap = HashMap::from_iter(p.iter()); + let o = HashMap::from_iter(p.iter::<(usize, usize)>()); assert_eq!(hm, o); } } From 6793a6b8741fc055633c28e568c2fc8237abbeea Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 12:20:09 +0200 Subject: [PATCH 181/357] Payload zserde improvement --- zenoh/src/payload.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 4899dd97e6..59ad8b79b5 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -540,7 +540,7 @@ impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { // - Integers impl macro_rules! impl_int { - ($t:ty, $encoding:expr) => { + ($t:ty) => { impl Serialize<$t> for ZSerde { type Output = Payload; @@ -615,22 +615,22 @@ macro_rules! impl_int { } // Zenoh unsigned integers -impl_int!(u8, ZSerde::ZENOH_UINT); -impl_int!(u16, ZSerde::ZENOH_UINT); -impl_int!(u32, ZSerde::ZENOH_UINT); -impl_int!(u64, ZSerde::ZENOH_UINT); -impl_int!(usize, ZSerde::ZENOH_UINT); +impl_int!(u8); +impl_int!(u16); +impl_int!(u32); +impl_int!(u64); +impl_int!(usize); // Zenoh signed integers -impl_int!(i8, ZSerde::ZENOH_INT); -impl_int!(i16, ZSerde::ZENOH_INT); -impl_int!(i32, ZSerde::ZENOH_INT); -impl_int!(i64, ZSerde::ZENOH_INT); -impl_int!(isize, ZSerde::ZENOH_INT); +impl_int!(i8); +impl_int!(i16); +impl_int!(i32); +impl_int!(i64); +impl_int!(isize); // Zenoh floats -impl_int!(f32, ZSerde::ZENOH_FLOAT); -impl_int!(f64, ZSerde::ZENOH_FLOAT); +impl_int!(f32); +impl_int!(f64); // Zenoh bool impl Serialize for ZSerde { @@ -1203,7 +1203,7 @@ mod tests { let v: [usize; 5] = [0, 1, 2, 3, 4]; println!("Serialize:\t{:?}", v); let p = Payload::from_iter(v.iter()); - println!("Deerialize:\t{:?}", p); + println!("Deserialize:\t{:?}", p); for (i, t) in p.iter::().enumerate() { assert_eq!(i, t); } @@ -1211,7 +1211,7 @@ mod tests { let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; println!("Serialize:\t{:?}", v); let p = Payload::from_iter(v.drain(..)); - println!("Deerialize:\t{:?}", p); + println!("Deserialize:\t{:?}", p); let mut iter = p.iter::<[u8; 4]>(); assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); assert_eq!(iter.next().unwrap(), [4, 5, 6, 7]); @@ -1225,7 +1225,7 @@ mod tests { hm.insert(1, 1); println!("Serialize:\t{:?}", hm); let p = Payload::from_iter(hm.drain()); - println!("Deerialize:\t{:?}", p); + println!("Deserialize:\t{:?}", p); let o = HashMap::from_iter(p.iter::<(usize, usize)>()); assert_eq!(hm, o); } From 7240f0169556a66fb4abca47dcfbcce736a01e53 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 12:22:08 +0200 Subject: [PATCH 182/357] Fix encoding doc: suffix to schema --- commons/zenoh-codec/src/core/encoding.rs | 4 ++-- commons/zenoh-protocol/src/core/encoding.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/commons/zenoh-codec/src/core/encoding.rs b/commons/zenoh-codec/src/core/encoding.rs index cfbe0084ba..c8033cdd5f 100644 --- a/commons/zenoh-codec/src/core/encoding.rs +++ b/commons/zenoh-codec/src/core/encoding.rs @@ -62,13 +62,13 @@ where fn read(self, reader: &mut R) -> Result { let zodec = Zenoh080Bounded::::new(); let id: u32 = zodec.read(&mut *reader)?; - let (id, has_suffix) = ( + let (id, has_schema) = ( (id >> 1) as EncodingId, imsg::has_flag(id as u8, flag::S as u8), ); let mut schema = None; - if has_suffix { + if has_schema { let zodec = Zenoh080Bounded::::new(); schema = Some(zodec.read(&mut *reader)?); } diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index 9b9aa5bf2f..70afdbf143 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -18,8 +18,8 @@ pub type EncodingId = u16; /// [`Encoding`] is a metadata that indicates how the data payload should be interpreted. /// For wire-efficiency and extensibility purposes, Zenoh defines an [`Encoding`] as -/// composed of an unsigned integer prefix and a string suffix. The actual meaning of the -/// prefix and suffix are out-of-scope of the protocol definition. Therefore, Zenoh does not +/// composed of an unsigned integer prefix and a bytes schema. The actual meaning of the +/// prefix and schema are out-of-scope of the protocol definition. Therefore, Zenoh does not /// impose any encoding mapping and users are free to use any mapping they like. /// Nevertheless, it is worth highlighting that Zenoh still provides a default mapping as part /// of the API as per user convenience. That mapping has no impact on the Zenoh protocol definition. @@ -40,7 +40,7 @@ pub struct Encoding { /// +---------------+ /// ``` pub mod flag { - pub const S: u32 = 1; // 0x01 Suffix if S==1 then suffix is present + pub const S: u32 = 1; // 0x01 Suffix if S==1 then schema is present } impl Encoding { From ccf48c38167dc2e6b4a6cb42974dc70e5c1b98de Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 9 Apr 2024 14:09:47 +0200 Subject: [PATCH 183/357] buffer reader exported --- zenoh/src/lib.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index f071360567..cb25ab8efc 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -106,8 +106,8 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ); // Expose some functions directly to root `zenoh::`` namespace for convenience -pub use crate::api::session::open; pub use crate::api::scouting::scout; +pub use crate::api::session::open; pub mod prelude; @@ -127,7 +127,10 @@ pub mod core { /// reading and writing data. pub mod buffers { pub use zenoh_buffers::buffer::SplitBuffer; - pub use zenoh_buffers::{ZBuf, ZSlice}; + pub use zenoh_buffers::reader::HasReader; + pub use zenoh_buffers::reader::Reader; + pub use zenoh_buffers::ZBufReader; + pub use zenoh_buffers::{ZBuf, ZSlice, ZSliceBuffer}; } /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. @@ -251,6 +254,7 @@ pub mod subscriber { /// Publishing primitives pub mod publication { pub use crate::api::builders::publication::PublisherBuilder; + pub use crate::api::publication::MatchingListener; pub use crate::api::publication::Priority; pub use crate::api::publication::Publisher; #[zenoh_macros::unstable] From 989509c447df98771abcfaea786e203e885db5b5 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 17:11:28 +0200 Subject: [PATCH 184/357] impl Seek for ZBufReader --- commons/zenoh-buffers/src/zbuf.rs | 144 +++++++++++++++++++++++++++--- zenoh/src/payload.rs | 7 +- 2 files changed, 134 insertions(+), 17 deletions(-) diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index fd86f454af..6fded06ae7 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -21,6 +21,8 @@ use crate::{ }; use alloc::{sync::Arc, vec::Vec}; use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; +#[cfg(feature = "std")] +use std::io; use zenoh_collections::SingleOrVec; fn get_mut_unchecked(arc: &mut Arc) -> &mut T { @@ -270,7 +272,7 @@ impl<'a> Reader for ZBufReader<'a> { } fn read_exact(&mut self, into: &mut [u8]) -> Result<(), DidntRead> { - let len = self.read(into)?; + let len = Reader::read(self, into)?; if len.get() == into.len() { Ok(()) } else { @@ -317,7 +319,7 @@ impl<'a> Reader for ZBufReader<'a> { match (slice.len() - self.cursor.byte).cmp(&len) { cmp::Ordering::Less => { let mut buffer = crate::vec::uninit(len); - self.read_exact(&mut buffer)?; + Reader::read_exact(self, &mut buffer)?; Ok(buffer.into()) } cmp::Ordering::Equal => { @@ -388,18 +390,58 @@ impl<'a> SiphonableReader for ZBufReader<'a> { } #[cfg(feature = "std")] -impl<'a> std::io::Read for ZBufReader<'a> { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { +impl<'a> io::Read for ZBufReader<'a> { + fn read(&mut self, buf: &mut [u8]) -> io::Result { match ::read(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::UnexpectedEof, + Err(_) => Err(io::Error::new( + io::ErrorKind::UnexpectedEof, "UnexpectedEof", )), } } } +#[cfg(feature = "std")] +impl<'a> io::Seek for ZBufReader<'a> { + fn seek(&mut self, pos: io::SeekFrom) -> io::Result { + // Compute the index + let len = self.inner.len(); + let index = match pos { + io::SeekFrom::Start(pos) => pos.try_into().unwrap_or(i64::MAX), + io::SeekFrom::End(pos) => { + pos + i64::try_from(len) + .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? + } + io::SeekFrom::Current(pos) => { + pos + i64::try_from(len - self.remaining()) + .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? + } + }; + + let index = usize::try_from(index) + .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? + .min(len); + + // Seek the position + let mut left = index; + let mut pos = ZBufPos { slice: 0, byte: 0 }; + while let Some(slice) = self.inner.slices.get(pos.slice) { + let len = slice.len(); + if len >= left { + pos.byte = left; + self.cursor = pos; + return Ok(index as u64); + } else { + left -= len; + } + pos.slice += 1; + } + + Err(io::ErrorKind::UnexpectedEof.into()) + } +} + // ZSlice iterator pub struct ZBufSliceIterator<'a, 'b> { reader: &'a mut ZBufReader<'b>, @@ -614,18 +656,18 @@ impl BacktrackableWriter for ZBufWriter<'_> { } #[cfg(feature = "std")] -impl<'a> std::io::Write for ZBufWriter<'a> { - fn write(&mut self, buf: &[u8]) -> std::io::Result { +impl<'a> io::Write for ZBufWriter<'a> { + fn write(&mut self, buf: &[u8]) -> io::Result { match ::write(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::UnexpectedEof, + Err(_) => Err(io::Error::new( + io::ErrorKind::UnexpectedEof, "UnexpectedEof", )), } } - fn flush(&mut self) -> std::io::Result<()> { + fn flush(&mut self) -> io::Result<()> { Ok(()) } } @@ -668,4 +710,84 @@ mod tests { assert_eq!(zbuf1, zbuf2); } + + #[cfg(feature = "std")] + #[test] + fn zbuf_seek() { + use crate::reader::HasReader; + use std::io::{Seek, SeekFrom}; + + use super::{ZBuf, ZSlice}; + + let slice: ZSlice = [0u8, 1, 2, 3, 4, 5, 6, 7].to_vec().into(); + + let mut zbuf = ZBuf::empty(); + zbuf.push_zslice(slice.subslice(0, 1).unwrap()); + zbuf.push_zslice(slice.subslice(1, 4).unwrap()); + zbuf.push_zslice(slice.subslice(4, 8).unwrap()); + + let mut reader = zbuf.reader(); + + let index = reader.seek(SeekFrom::Start(0)).unwrap(); + assert_eq!(index, 0); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Start(4)).unwrap(); + assert_eq!(index, 4); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Start(8)).unwrap(); + assert_eq!(index, 8); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Start(u64::MAX)).unwrap(); + assert_eq!(index, 8); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::End(0)).unwrap(); + assert_eq!(index, 8); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::End(-4)).unwrap(); + assert_eq!(index, 4); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::End(-8)).unwrap(); + assert_eq!(index, 0); + assert_eq!(index, reader.stream_position().unwrap()); + + reader.seek(SeekFrom::End(i64::MIN)).unwrap_err(); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Start(0)).unwrap(); + assert_eq!(index, 0); + assert_eq!(index, reader.stream_position().unwrap()); + + reader.seek(SeekFrom::Current(-1)).unwrap_err(); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(2)).unwrap(); + assert_eq!(index, 2); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(2)).unwrap(); + assert_eq!(index, 4); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(-2)).unwrap(); + assert_eq!(index, 2); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(-2)).unwrap(); + assert_eq!(index, 0); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(i64::MAX)).unwrap(); + assert_eq!(index, 8); + assert_eq!(index, reader.stream_position().unwrap()); + + let index = reader.seek(SeekFrom::Current(-1)).unwrap(); + assert_eq!(index, 7); + assert_eq!(index, reader.stream_position().unwrap()); + } } diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 59ad8b79b5..4de36f2d94 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -22,7 +22,7 @@ use std::{ use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, - reader::{HasReader, Reader}, + reader::HasReader, writer::HasWriter, ZBufReader, ZSlice, }; @@ -171,11 +171,6 @@ where let t = ZSerde.deserialize(&kpld).ok()?; Some(t) } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.reader.remaining(); - (remaining, Some(remaining)) - } } impl FromIterator for Payload From 140526b6881ef3ddcc7536ccf879cd86692e36bf Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 17:14:18 +0200 Subject: [PATCH 185/357] impl Seek for PayloadReader --- zenoh/src/payload.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 4de36f2d94..7e42b4564a 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -14,7 +14,6 @@ //! Payload primitives. use crate::buffers::ZBuf; -use std::io::Read; use std::marker::PhantomData; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, @@ -143,6 +142,12 @@ impl std::io::Read for PayloadReader<'_> { } } +impl std::io::Seek for PayloadReader<'_> { + fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { + std::io::Seek::seek(&mut self.0, pos) + } +} + /// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. /// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. #[repr(transparent)] @@ -285,6 +290,8 @@ impl Deserialize<'_, [u8; N]> for ZSerde { type Error = ZDeserializeError; fn deserialize(self, v: &Payload) -> Result<[u8; N], Self::Error> { + use std::io::Read; + if v.0.len() != N { return Err(ZDeserializeError); } From 2dceb52b4db864d4616fd0ca1d271d2e423752cc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 17:40:18 +0200 Subject: [PATCH 186/357] Fix tests --- zenoh/src/payload.rs | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 7e42b4564a..a63d19d4a9 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -19,6 +19,7 @@ use std::{ borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, }; use unwrap_infallible::UnwrapInfallible; +use zenoh_buffers::ZBufWriter; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, @@ -148,6 +149,21 @@ impl std::io::Seek for PayloadReader<'_> { } } +/// A writer that implements [`std::io::Write`] trait to write into a [`Payload`]. +#[repr(transparent)] +#[derive(Debug)] +pub struct PayloadWriter<'a>(ZBufWriter<'a>); + +impl std::io::Write for PayloadWriter<'_> { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + std::io::Write::write(&mut self.0, buf) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } +} + /// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. /// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. #[repr(transparent)] @@ -1205,7 +1221,7 @@ mod tests { let v: [usize; 5] = [0, 1, 2, 3, 4]; println!("Serialize:\t{:?}", v); let p = Payload::from_iter(v.iter()); - println!("Deserialize:\t{:?}", p); + println!("Deserialize:\t{:?}\n", p); for (i, t) in p.iter::().enumerate() { assert_eq!(i, t); } @@ -1213,7 +1229,7 @@ mod tests { let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; println!("Serialize:\t{:?}", v); let p = Payload::from_iter(v.drain(..)); - println!("Deserialize:\t{:?}", p); + println!("Deserialize:\t{:?}\n", p); let mut iter = p.iter::<[u8; 4]>(); assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); assert_eq!(iter.next().unwrap(), [4, 5, 6, 7]); @@ -1226,8 +1242,8 @@ mod tests { hm.insert(0, 0); hm.insert(1, 1); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.drain()); - println!("Deserialize:\t{:?}", p); + let p = Payload::from_iter(hm.clone().drain()); + println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, usize)>()); assert_eq!(hm, o); } From 0294dc953da588ec190364474bc0dce60dcf5363 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 9 Apr 2024 17:41:21 +0200 Subject: [PATCH 187/357] zserde exported --- zenoh/src/api/encoding.rs | 3 +++ zenoh/src/lib.rs | 1 + 2 files changed, 4 insertions(+) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 7518671eed..aba01e01b4 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -848,3 +848,6 @@ impl EncodingMapping for Box { impl EncodingMapping for SharedMemoryBuf { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } + +pub struct EncodingBuilder(Encoding); + diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index cb25ab8efc..5d35f35c8a 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -231,6 +231,7 @@ pub mod payload { pub use crate::api::payload::PayloadReader; pub use crate::api::payload::Serialize; pub use crate::api::payload::StringOrBase64; + pub use crate::api::payload::ZSerde; } /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries From c2c6217bcb894fe7d5319249c3b46f2f5230d998 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 18:07:06 +0200 Subject: [PATCH 188/357] Add SHM support for deserializer --- Cargo.lock | 1 + examples/Cargo.toml | 7 +++- examples/examples/z_sub.rs | 1 - examples/examples/z_sub_shm.rs | 66 ++++++++++++++++++++++++++++++++++ zenoh/src/payload.rs | 23 ++++++++++++ 5 files changed, 96 insertions(+), 2 deletions(-) create mode 100644 examples/examples/z_sub_shm.rs diff --git a/Cargo.lock b/Cargo.lock index 3f74af9ed1..a9d327a978 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4612,6 +4612,7 @@ dependencies = [ "zenoh", "zenoh-collections", "zenoh-ext", + "zenoh-shm", ] [[package]] diff --git a/examples/Cargo.toml b/examples/Cargo.toml index fc1db17fe8..fb9c4c481d 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -27,7 +27,7 @@ readme = "README.md" publish = false [features] -shared-memory = ["zenoh/shared-memory"] +shared-memory = ["zenoh-shm","zenoh/shared-memory"] unstable = ["zenoh/unstable"] transport_unixpipe = ["zenoh/transport_unixpipe"] @@ -52,6 +52,7 @@ log = { workspace = true } zenoh = { workspace = true } zenoh-collections = { workspace = true } zenoh-ext = { workspace = true } +zenoh-shm = { workspace = true, optional = true } [dev-dependencies] rand = { workspace = true, features = ["default"] } @@ -96,6 +97,10 @@ required-features = ["shared-memory"] name = "z_sub" path = "examples/z_sub.rs" +[[example]] +name = "z_sub_shm" +path = "examples/z_sub_shm.rs" + [[example]] name = "z_pull" path = "examples/z_pull.rs" diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index fbce562c2e..299f0c8f49 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -32,7 +32,6 @@ async fn main() { let session = zenoh::open(config).res().await.unwrap(); println!("Declaring Subscriber on '{}'...", &key_expr); - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); println!("Press CTRL-C to quit..."); diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs new file mode 100644 index 0000000000..630876f287 --- /dev/null +++ b/examples/examples/z_sub_shm.rs @@ -0,0 +1,66 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use zenoh::config::Config; +use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; +use zenoh_shm::SharedMemoryBuf; + +#[tokio::main] +async fn main() { + // Initiate logging + env_logger::init(); + + let (mut config, key_expr) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + println!("Opening session..."); + let session = zenoh::open(config).res().await.unwrap(); + + println!("Declaring Subscriber on '{}'...", &key_expr); + let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + + println!("Press CTRL-C to quit..."); + while let Ok(sample) = subscriber.recv_async().await { + match sample.payload().deserialize::() { + Ok(payload) => println!( + ">> [Subscriber] Received {} ('{}': '{:02x?}')", + sample.kind(), + sample.key_expr().as_str(), + payload.as_slice() + ), + Err(e) => { + println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + } + } + } +} + +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct SubArgs { + #[arg(short, long, default_value = "demo/example/**")] + /// The Key Expression to subscribe to. + key: KeyExpr<'static>, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, KeyExpr<'static>) { + let args = SubArgs::parse(); + (args.common.into(), args.key) +} diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index a63d19d4a9..b05cf868a8 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -1000,6 +1000,29 @@ impl From for Payload { } } +#[cfg(feature = "shared-memory")] +impl Deserialize<'_, SharedMemoryBuf> for ZSerde { + type Error = ZDeserializeError; + + fn deserialize(self, v: &Payload) -> Result { + for zs in v.0.zslices() { + if let Some(shmb) = zs.downcast_ref::() { + return Ok(shmb.clone()); + } + } + Err(ZDeserializeError) + } +} + +#[cfg(feature = "shared-memory")] +impl TryFrom for SharedMemoryBuf { + type Error = ZDeserializeError; + + fn try_from(value: Payload) -> Result { + ZSerde.deserialize(&value) + } +} + // Tuple impl Serialize<(A, B)> for ZSerde where From e4ee3069e4cff58a79e983d2bdb9c357a5975177 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 18:30:30 +0200 Subject: [PATCH 189/357] Fix SharedMemoryBuf deserialize --- zenoh/src/payload.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index b05cf868a8..1cb9fae783 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -1005,7 +1005,9 @@ impl Deserialize<'_, SharedMemoryBuf> for ZSerde { type Error = ZDeserializeError; fn deserialize(self, v: &Payload) -> Result { - for zs in v.0.zslices() { + // A SharedMemoryBuf is expected to have only one slice + let mut zslices = v.0.zslices(); + if let Some(zs) = zslices.next() { if let Some(shmb) = zs.downcast_ref::() { return Ok(shmb.clone()); } From 8e87318dd343c3931ce8df024c48fbcf681cd7be Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 9 Apr 2024 18:30:35 +0200 Subject: [PATCH 190/357] export keyexpr SetIntersectionLevel --- zenoh/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 5d35f35c8a..2d71f81c97 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -174,6 +174,7 @@ pub mod key_expr { pub use crate::api::key_expr::KeyExpr; pub use zenoh_keyexpr::keyexpr; pub use zenoh_keyexpr::OwnedKeyExpr; + pub use zenoh_keyexpr::SetIntersectionLevel; pub use zenoh_macros::{kedefine, keformat, kewrite}; // keyexpr format macro support pub mod format { From 0a38e277ec4c5be9714efe2b277e2e108428ba91 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 9 Apr 2024 18:32:57 +0200 Subject: [PATCH 191/357] liveliness token export --- zenoh/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 2d71f81c97..247d1d68a0 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -305,6 +305,7 @@ pub mod scouting { pub mod liveliness { pub use crate::api::liveliness::Liveliness; pub use crate::api::liveliness::LivelinessSubscriberBuilder; + pub use crate::api::liveliness::LivelinessToken; } /// Timestamp support From cf861e1ecaa75930488e72b8288027828d1eadb4 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 18:36:10 +0200 Subject: [PATCH 192/357] Fix clippy examples --- examples/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index fb9c4c481d..4a4a4fef3e 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -100,6 +100,7 @@ path = "examples/z_sub.rs" [[example]] name = "z_sub_shm" path = "examples/z_sub_shm.rs" +required-features = ["shared-memory"] [[example]] name = "z_pull" From 28e23ab3c2713c2b65e331a7d432c0c2856c63b9 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 9 Apr 2024 19:42:25 +0200 Subject: [PATCH 193/357] Add writer method to payload --- zenoh/src/payload.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 1cb9fae783..f8af7e182a 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -14,9 +14,9 @@ //! Payload primitives. use crate::buffers::ZBuf; -use std::marker::PhantomData; use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, ops::Deref, string::FromUtf8Error, sync::Arc, + borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, + string::FromUtf8Error, sync::Arc, }; use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::ZBufWriter; @@ -57,7 +57,7 @@ impl Payload { Self(ZBuf::empty()) } - /// Create a [`Payload`] from any type `T` that can implements [`Into`]. + /// Create a [`Payload`] from any type `T` that implements [`Into`]. pub fn new(t: T) -> Self where T: Into, @@ -80,7 +80,7 @@ impl Payload { PayloadReader(self.0.reader()) } - /// Build a [`Payload`] from a [`Reader`]. This operation copies data from the reader. + /// Build a [`Payload`] from a generic reader implementing [`std::io::Read`]. This operation copies data from the reader. pub fn from_reader(mut reader: R) -> Result where R: std::io::Read, @@ -103,6 +103,11 @@ impl Payload { } } + /// Get a [`PayloadWriter`] implementing [`std::io::Write`] trait. + pub fn writer(&mut self) -> PayloadWriter<'_> { + PayloadWriter(self.0.writer()) + } + /// Encode an object of type `T` as a [`Value`] using the [`ZSerde`]. /// /// ```rust @@ -733,7 +738,7 @@ impl Serialize<&serde_json::Value> for ZSerde { fn serialize(self, t: &serde_json::Value) -> Self::Output { let mut payload = Payload::empty(); - serde_json::to_writer(payload.0.writer(), t)?; + serde_json::to_writer(payload.writer(), t)?; Ok(payload) } } From 042964e11e8a6aa423611f669b33e5426bdfd7bc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 10 Apr 2024 11:49:09 +0200 Subject: [PATCH 194/357] Add ZSlice payload support --- commons/zenoh-buffers/src/zbuf.rs | 41 ++++- commons/zenoh-buffers/src/zslice.rs | 4 + .../zenoh-collections/src/single_or_vec.rs | 3 + zenoh/src/payload.rs | 174 +++++++++++++++--- 4 files changed, 187 insertions(+), 35 deletions(-) diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 6fded06ae7..cfface650a 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -17,7 +17,7 @@ use crate::{ buffer::{Buffer, SplitBuffer}, reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - ZSlice, + ZSlice, ZSliceBuffer, }; use alloc::{sync::Arc, vec::Vec}; use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; @@ -60,6 +60,21 @@ impl ZBuf { } } + pub fn to_zslice(&self) -> ZSlice { + let mut slices = self.zslices(); + match self.slices.len() { + 0 => ZSlice::empty(), + // SAFETY: it's safe to use unwrap_unchecked() beacuse we are explicitly checking the length is 1. + 1 => unsafe { slices.next().unwrap_unchecked().clone() }, + _ => slices + .fold(Vec::new(), |mut acc, it| { + acc.extend(it.as_slice()); + acc + }) + .into(), + } + } + pub fn splice>(&mut self, erased: Range, replacement: &[u8]) { let start = match erased.start_bound() { core::ops::Bound::Included(n) => *n, @@ -201,15 +216,31 @@ impl PartialEq for ZBuf { } // From impls +impl From for ZBuf { + fn from(t: ZSlice) -> Self { + let mut zbuf = ZBuf::empty(); + zbuf.push_zslice(t); + zbuf + } +} + +impl From> for ZBuf +where + T: ZSliceBuffer + 'static, +{ + fn from(t: Arc) -> Self { + let zslice: ZSlice = t.into(); + Self::from(zslice) + } +} + impl From for ZBuf where - T: Into, + T: ZSliceBuffer + 'static, { fn from(t: T) -> Self { - let mut zbuf = ZBuf::empty(); let zslice: ZSlice = t.into(); - zbuf.push_zslice(zslice); - zbuf + Self::from(zslice) } } diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index c15cbc6828..05c77cac7d 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -114,6 +114,10 @@ impl ZSlice { } } + pub fn empty() -> Self { + unsafe { ZSlice::new_unchecked(Arc::new([]), 0, 0) } + } + /// # Safety /// This function does not verify wether the `start` and `end` indexes are within the buffer boundaries. /// If a [`ZSlice`] is built via this constructor, a later access may panic if `start` and `end` indexes are out-of-bound. diff --git a/commons/zenoh-collections/src/single_or_vec.rs b/commons/zenoh-collections/src/single_or_vec.rs index ceb43e4025..ed82bf49af 100644 --- a/commons/zenoh-collections/src/single_or_vec.rs +++ b/commons/zenoh-collections/src/single_or_vec.rs @@ -182,14 +182,17 @@ impl SingleOrVec { self.vectorize().insert(at, value); } } + enum DrainInner<'a, T> { Vec(alloc::vec::Drain<'a, T>), Single(&'a mut SingleOrVecInner), Done, } + pub struct Drain<'a, T> { inner: DrainInner<'a, T>, } + impl<'a, T> Iterator for Drain<'a, T> { type Item = T; diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index f8af7e182a..1b91757329 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -91,11 +91,11 @@ impl Payload { } /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. - pub fn iter<'a, T>(&'a self) -> PayloadIterator<'a, T> + pub fn iter(&self) -> PayloadIterator<'_, T> where - T: TryFrom, - ZSerde: Deserialize<'a, T>, - >::Error: Debug, + T: for<'b> TryFrom<&'b Payload>, + for<'b> ZSerde: Deserialize<'b, T>, + for<'b> >::Error: Debug, { PayloadIterator { reader: self.0.reader(), @@ -126,14 +126,23 @@ impl Payload { } /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. - /// See [encode](Value::encode) for an example. pub fn deserialize<'a, T>(&'a self) -> ZResult where ZSerde: Deserialize<'a, T>, >::Error: Debug, { - let t: T = ZSerde.deserialize(self).map_err(|e| zerror!("{:?}", e))?; - Ok(t) + ZSerde + .deserialize(self) + .map_err(|e| zerror!("{:?}", e).into()) + } + + /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn into<'a, T>(&'a self) -> T + where + ZSerde: Deserialize<'a, T, Error = Infallible>, + >::Error: Debug, + { + ZSerde.deserialize(self).unwrap_infallible() } } @@ -181,10 +190,10 @@ where _t: PhantomData, } -impl<'a, T> Iterator for PayloadIterator<'a, T> +impl Iterator for PayloadIterator<'_, T> where - ZSerde: for<'b> Deserialize<'b, T>, - >::Error: Debug, + for<'a> ZSerde: Deserialize<'a, T>, + for<'a> >::Error: Debug, { type Item = T; @@ -278,6 +287,55 @@ impl From<&Payload> for ZBuf { } } +// ZSlice +impl Serialize for ZSerde { + type Output = Payload; + + fn serialize(self, t: ZSlice) -> Self::Output { + Payload::new(t) + } +} + +impl From for Payload { + fn from(t: ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&ZSlice> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &ZSlice) -> Self::Output { + Payload::new(t.clone()) + } +} + +impl From<&ZSlice> for Payload { + fn from(t: &ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize<'_, ZSlice> for ZSerde { + type Error = Infallible; + + fn deserialize(self, v: &Payload) -> Result { + Ok(v.0.to_zslice()) + } +} + +impl From for ZSlice { + fn from(value: Payload) -> Self { + ZBuf::from(value).to_zslice() + } +} + +impl From<&Payload> for ZSlice { + fn from(value: &Payload) -> Self { + ZSerde.deserialize(value).unwrap_infallible() + } +} + // [u8; N] impl Serialize<[u8; N]> for ZSerde { type Output = Payload; @@ -515,7 +573,6 @@ impl From<&str> for Payload { } } -// Cow impl<'a> Serialize> for ZSerde { type Output = Payload; @@ -1069,16 +1126,16 @@ where } } -impl<'a, A, B> Deserialize<'a, (A, B)> for ZSerde +impl Deserialize<'_, (A, B)> for ZSerde where - A: TryFrom, - >::Error: Debug, - B: TryFrom, - >::Error: Debug, + for<'a> A: TryFrom<&'a Payload>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b Payload>, + for<'b> >::Error: Debug, { type Error = ZError; - fn deserialize(self, payload: &'a Payload) -> Result<(A, B), Self::Error> { + fn deserialize(self, payload: &Payload) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); let mut reader = payload.0.reader(); @@ -1088,18 +1145,18 @@ where let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; let bpld = Payload::new(bbuf); - let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; - let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; + let a = A::try_from(&apld).map_err(|e| zerror!("{:?}", e))?; + let b = B::try_from(&bpld).map_err(|e| zerror!("{:?}", e))?; Ok((a, b)) } } impl TryFrom for (A, B) where - A: TryFrom, - >::Error: Debug, - B: TryFrom, - >::Error: Debug, + A: for<'a> TryFrom<&'a Payload>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b Payload>, + for<'b> >::Error: Debug, { type Error = ZError; @@ -1108,6 +1165,20 @@ where } } +impl TryFrom<&Payload> for (A, B) +where + for<'a> A: TryFrom<&'a Payload>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b Payload>, + for<'b> >::Error: Debug, +{ + type Error = ZError; + + fn try_from(value: &Payload) -> Result { + ZSerde.deserialize(value) + } +} + // For convenience to always convert a Value in the examples #[derive(Debug, Clone, PartialEq, Eq)] pub enum StringOrBase64 { @@ -1142,12 +1213,9 @@ impl std::fmt::Display for StringOrBase64 { impl From<&Payload> for StringOrBase64 { fn from(v: &Payload) -> Self { use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; - match v.deserialize::>() { - Ok(s) => StringOrBase64::String(s.into_owned()), - Err(_) => { - let cow: Cow<'_, [u8]> = Cow::from(v); - StringOrBase64::Base64(b64_std_engine.encode(cow)) - } + match v.deserialize::() { + Ok(s) => StringOrBase64::String(s), + Err(_) => StringOrBase64::Base64(b64_std_engine.encode(v.into::>())), } } } @@ -1157,7 +1225,7 @@ mod tests { fn serializer() { use super::Payload; use rand::Rng; - use zenoh_buffers::ZBuf; + use zenoh_buffers::{ZBuf, ZSlice}; const NUM: usize = 1_000; @@ -1276,5 +1344,51 @@ mod tests { println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, usize)>()); assert_eq!(hm, o); + + let mut hm: HashMap> = HashMap::new(); + hm.insert(0, vec![0u8; 8]); + hm.insert(1, vec![1u8; 16]); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.clone().drain()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); + assert_eq!(hm, o); + + let mut hm: HashMap> = HashMap::new(); + hm.insert(0, vec![0u8; 8]); + hm.insert(1, vec![1u8; 16]); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.clone().drain()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); + assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(0, ZSlice::from(vec![0u8; 8])); + hm.insert(1, ZSlice::from(vec![1u8; 16])); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.clone().drain()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(usize, ZSlice)>()); + assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(0, ZBuf::from(vec![0u8; 8])); + hm.insert(1, ZBuf::from(vec![1u8; 16])); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.clone().drain()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>()); + assert_eq!(hm, o); + + use std::borrow::Cow; + let mut hm: HashMap> = HashMap::new(); + hm.insert(0, vec![0u8; 8]); + hm.insert(1, vec![1u8; 16]); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); + assert_eq!(hm, o); } } From abded105583f165d939ac9b24174e6a65b11abbb Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 10 Apr 2024 11:58:42 +0200 Subject: [PATCH 195/357] Improve payload --- zenoh/src/payload.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 1b91757329..aed0d15834 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -1225,6 +1225,7 @@ mod tests { fn serializer() { use super::Payload; use rand::Rng; + use std::borrow::Cow; use zenoh_buffers::{ZBuf, ZSlice}; const NUM: usize = 1_000; @@ -1302,10 +1303,21 @@ mod tests { serialize_deserialize!(String, ""); serialize_deserialize!(String, String::from("abcdefghijklmnopqrstuvwxyz")); + // Cow + serialize_deserialize!(Cow, Cow::from("")); + serialize_deserialize!( + Cow, + Cow::from(String::from("abcdefghijklmnopqrstuvwxyz")) + ); + // Vec serialize_deserialize!(Vec, vec![0u8; 0]); serialize_deserialize!(Vec, vec![0u8; 64]); + // Cow<[u8]> + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); + // ZBuf serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); @@ -1381,7 +1393,6 @@ mod tests { let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>()); assert_eq!(hm, o); - use std::borrow::Cow; let mut hm: HashMap> = HashMap::new(); hm.insert(0, vec![0u8; 8]); hm.insert(1, vec![1u8; 16]); From adf422d89945f1958ff2460f0816c684fa2cfe37 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 10 Apr 2024 12:30:41 +0200 Subject: [PATCH 196/357] allowed build zenoh without unstable feature set (#910) --- zenoh/src/publication.rs | 1 + zenoh/src/query.rs | 2 ++ zenoh/src/queryable.rs | 12 ++++++++++-- zenoh/src/sample/builder.rs | 1 + zenoh/src/sample/mod.rs | 8 ++++---- zenoh/src/session.rs | 6 +++++- zenoh/src/subscriber.rs | 3 --- 7 files changed, 23 insertions(+), 10 deletions(-) diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index c176ad32e0..4f31c73a24 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -157,6 +157,7 @@ impl

ValueBuilderTrait for PublicationBuilder { } } +#[zenoh_macros::unstable] impl SampleBuilderTrait for PublicationBuilder { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index cb1116130d..3a380bd1c9 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -133,6 +133,7 @@ pub struct GetBuilder<'a, 'b, Handler> { pub(crate) source_info: SourceInfo, } +#[zenoh_macros::unstable] impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { #[cfg(feature = "unstable")] fn source_info(self, source_info: SourceInfo) -> Self { @@ -430,6 +431,7 @@ where self.value, #[cfg(feature = "unstable")] self.attachment, + #[cfg(feature = "unstable")] self.source_info, callback, ) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 6fbb4e9090..0ad3a36c07 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -18,12 +18,15 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::{QoSBuilder, SourceInfo}; +use crate::sample::builder::SampleBuilder; +use crate::sample::QoSBuilder; +#[cfg(feature = "unstable")] +use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::builder::SampleBuilder, sample::Attachment}; +use crate::{query::ReplyKeyExpr, sample::Attachment}; use std::fmt; use std::future::Ready; use std::ops::Deref; @@ -155,7 +158,9 @@ impl Query { encoding: Encoding::default(), }, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -193,7 +198,9 @@ impl Query { qos: response::ext::QoSType::RESPONSE.into(), kind: ReplyBuilderDelete, timestamp: None, + #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] attachment: None, } } @@ -298,6 +305,7 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { } } +#[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] fn attachment>>(self, attachment: U) -> Self { diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index fca55edd09..bad35024ef 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -163,6 +163,7 @@ impl TimestampBuilderTrait for SampleBuilder { } } +#[cfg(feature = "unstable")] impl SampleBuilderTrait for SampleBuilder { #[zenoh_macros::unstable] fn source_info(self, source_info: SourceInfo) -> Self { diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 6e457578a3..0ef8462d2a 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -22,9 +22,9 @@ use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; use std::{convert::TryFrom, fmt}; +use zenoh_protocol::core::CongestionControl; use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::network::declare::ext::QoSType; -use zenoh_protocol::{core::CongestionControl, zenoh}; pub mod builder; @@ -178,12 +178,12 @@ impl SourceInfo { } #[zenoh_macros::unstable] -impl From for Option { - fn from(source_info: SourceInfo) -> Option { +impl From for Option { + fn from(source_info: SourceInfo) -> Option { if source_info.is_empty() { None } else { - Some(zenoh::put::ext::SourceInfoType { + Some(zenoh_protocol::zenoh::put::ext::SourceInfoType { id: source_info.source_id.unwrap_or_default(), sn: source_info.source_sn.unwrap_or_default() as u32, }) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index f694eb6420..181976dcb0 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -61,6 +61,8 @@ use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; #[cfg(feature = "unstable")] use zenoh_protocol::network::declare::SubscriberId; +#[cfg(feature = "unstable")] +use zenoh_protocol::network::ext; use zenoh_protocol::network::AtomicRequestId; use zenoh_protocol::network::RequestId; use zenoh_protocol::zenoh::reply::ReplyBody; @@ -77,7 +79,6 @@ use zenoh_protocol::{ subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, - ext, request::{self, ext::TargetType, Request}, Mapping, Push, Response, ResponseFinal, }, @@ -1687,7 +1688,10 @@ impl Session { payload: RequestBody::Query(zenoh_protocol::zenoh::Query { consolidation, parameters: selector.parameters().to_string(), + #[cfg(feature = "unstable")] ext_sinfo: source.into(), + #[cfg(not(feature = "unstable"))] + ext_sinfo: None, ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 60a31a6577..47d41ebb1f 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -202,9 +202,6 @@ pub struct SubscriberBuilder<'a, 'b, Handler> { #[cfg(not(feature = "unstable"))] pub(crate) reliability: Reliability, - #[cfg(not(feature = "unstable"))] - pub(crate) mode: Mode, - #[cfg(feature = "unstable")] pub origin: Locality, #[cfg(not(feature = "unstable"))] From 7d9d57c2b2ef023a0c5887efb092250f2ff2ef44 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 10 Apr 2024 13:03:54 +0200 Subject: [PATCH 197/357] Fix tests --- zenoh/src/payload.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index aed0d15834..eac4f58e7c 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -14,6 +14,7 @@ //! Payload primitives. use crate::buffers::ZBuf; +use std::str::Utf8Error; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, string::FromUtf8Error, sync::Arc, @@ -494,7 +495,7 @@ impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { type Error = Infallible; fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { - Ok(Cow::from(v)) + Ok(v.0.contiguous()) } } @@ -602,16 +603,19 @@ impl From<&Cow<'_, str>> for Payload { } impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { - type Error = FromUtf8Error; + type Error = Utf8Error; - fn deserialize(self, v: &Payload) -> Result, Self::Error> { - let v: String = Self.deserialize(v)?; - Ok(Cow::Owned(v)) + fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { + let v: Cow<[u8]> = Self.deserialize(v).unwrap_infallible(); + let _ = core::str::from_utf8(v.as_ref())?; + // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 + // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. + Ok(unsafe { core::mem::transmute(v) }) } } impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { - type Error = FromUtf8Error; + type Error = Utf8Error; fn try_from(value: &'a Payload) -> Result { ZSerde.deserialize(value) @@ -1301,14 +1305,11 @@ mod tests { // String serialize_deserialize!(String, ""); - serialize_deserialize!(String, String::from("abcdefghijklmnopqrstuvwxyz")); + serialize_deserialize!(String, String::from("abcdef")); // Cow serialize_deserialize!(Cow, Cow::from("")); - serialize_deserialize!( - Cow, - Cow::from(String::from("abcdefghijklmnopqrstuvwxyz")) - ); + serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); // Vec serialize_deserialize!(Vec, vec![0u8; 0]); From 27063b6fd2f15be36aa3988c37cf1cbb46933c40 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 10 Apr 2024 15:10:17 +0200 Subject: [PATCH 198/357] Integrating #918 --- commons/zenoh-buffers/src/lib.rs | 12 ++ commons/zenoh-buffers/src/zbuf.rs | 213 +++++++++++++++--------------- 2 files changed, 117 insertions(+), 108 deletions(-) diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index eae7f1715c..117fb412b7 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -199,6 +199,18 @@ pub mod reader { fn rewind(&mut self, mark: Self::Mark) -> bool; } + pub trait AdvanceableReader: Reader { + fn skip(&mut self, offset: usize) -> Result<(), DidntRead>; + fn backtrack(&mut self, offset: usize) -> Result<(), DidntRead>; + fn advance(&mut self, offset: isize) -> Result<(), DidntRead> { + if offset > 0 { + self.skip(offset as usize) + } else { + self.backtrack((-offset) as usize) + } + } + } + #[derive(Debug, Clone, Copy)] pub struct DidntSiphon; diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index cfface650a..f3621049b0 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -15,7 +15,10 @@ use crate::ZSliceKind; use crate::{ buffer::{Buffer, SplitBuffer}, - reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, + reader::{ + AdvanceableReader, BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, + SiphonableReader, + }, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, ZSlice, ZSliceBuffer, }; @@ -433,43 +436,74 @@ impl<'a> io::Read for ZBufReader<'a> { } } -#[cfg(feature = "std")] -impl<'a> io::Seek for ZBufReader<'a> { - fn seek(&mut self, pos: io::SeekFrom) -> io::Result { - // Compute the index - let len = self.inner.len(); - let index = match pos { - io::SeekFrom::Start(pos) => pos.try_into().unwrap_or(i64::MAX), - io::SeekFrom::End(pos) => { - pos + i64::try_from(len) - .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? - } - io::SeekFrom::Current(pos) => { - pos + i64::try_from(len - self.remaining()) - .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? +impl<'a> AdvanceableReader for ZBufReader<'a> { + fn skip(&mut self, offset: usize) -> Result<(), DidntRead> { + let mut remaining_offset = offset; + while remaining_offset > 0 { + let s = self.inner.slices.get(self.cursor.slice).ok_or(DidntRead)?; + let remains_in_current_slice = s.len() - self.cursor.byte; + let advance = remaining_offset.min(remains_in_current_slice); + remaining_offset -= advance; + self.cursor.byte += advance; + if self.cursor.byte == s.len() { + self.cursor.slice += 1; + self.cursor.byte = 0; } - }; + } + Ok(()) + } - let index = usize::try_from(index) - .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))? - .min(len); - - // Seek the position - let mut left = index; - let mut pos = ZBufPos { slice: 0, byte: 0 }; - while let Some(slice) = self.inner.slices.get(pos.slice) { - let len = slice.len(); - if len >= left { - pos.byte = left; - self.cursor = pos; - return Ok(index as u64); - } else { - left -= len; + fn backtrack(&mut self, offset: usize) -> Result<(), DidntRead> { + let mut remaining_offset = offset; + while remaining_offset > 0 { + let backtrack = remaining_offset.min(self.cursor.byte); + remaining_offset -= backtrack; + self.cursor.byte -= backtrack; + if self.cursor.byte == 0 { + if self.cursor.slice == 0 { + break; + } + self.cursor.slice -= 1; + self.cursor.byte = self + .inner + .slices + .get(self.cursor.slice) + .ok_or(DidntRead)? + .len(); } - pos.slice += 1; } + if remaining_offset == 0 { + Ok(()) + } else { + Err(DidntRead) + } + } +} - Err(io::ErrorKind::UnexpectedEof.into()) +#[cfg(feature = "std")] +impl<'a> io::Seek for ZBufReader<'a> { + fn seek(&mut self, pos: io::SeekFrom) -> io::Result { + let current_pos = self + .inner + .slices() + .take(self.cursor.slice) + .fold(0, |acc, s| acc + s.len()) + + self.cursor.byte; + let current_pos = i64::try_from(current_pos) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("{}", e)))?; + + let offset = match pos { + std::io::SeekFrom::Start(s) => i64::try_from(s).unwrap_or(i64::MAX) - current_pos, + std::io::SeekFrom::Current(s) => s, + std::io::SeekFrom::End(s) => self.inner.len() as i64 + s - current_pos, + }; + match self.advance(offset as isize) { + Ok(()) => Ok((offset + current_pos) as u64), + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "InvalidInput", + )), + } } } @@ -745,80 +779,43 @@ mod tests { #[cfg(feature = "std")] #[test] fn zbuf_seek() { - use crate::reader::HasReader; - use std::io::{Seek, SeekFrom}; - - use super::{ZBuf, ZSlice}; - - let slice: ZSlice = [0u8, 1, 2, 3, 4, 5, 6, 7].to_vec().into(); - - let mut zbuf = ZBuf::empty(); - zbuf.push_zslice(slice.subslice(0, 1).unwrap()); - zbuf.push_zslice(slice.subslice(1, 4).unwrap()); - zbuf.push_zslice(slice.subslice(4, 8).unwrap()); - - let mut reader = zbuf.reader(); - - let index = reader.seek(SeekFrom::Start(0)).unwrap(); - assert_eq!(index, 0); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Start(4)).unwrap(); - assert_eq!(index, 4); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Start(8)).unwrap(); - assert_eq!(index, 8); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Start(u64::MAX)).unwrap(); - assert_eq!(index, 8); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::End(0)).unwrap(); - assert_eq!(index, 8); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::End(-4)).unwrap(); - assert_eq!(index, 4); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::End(-8)).unwrap(); - assert_eq!(index, 0); - assert_eq!(index, reader.stream_position().unwrap()); - - reader.seek(SeekFrom::End(i64::MIN)).unwrap_err(); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Start(0)).unwrap(); - assert_eq!(index, 0); - assert_eq!(index, reader.stream_position().unwrap()); - - reader.seek(SeekFrom::Current(-1)).unwrap_err(); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(2)).unwrap(); - assert_eq!(index, 2); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(2)).unwrap(); - assert_eq!(index, 4); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(-2)).unwrap(); - assert_eq!(index, 2); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(-2)).unwrap(); - assert_eq!(index, 0); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(i64::MAX)).unwrap(); - assert_eq!(index, 8); - assert_eq!(index, reader.stream_position().unwrap()); - - let index = reader.seek(SeekFrom::Current(-1)).unwrap(); - assert_eq!(index, 7); - assert_eq!(index, reader.stream_position().unwrap()); + use super::{HasReader, ZBuf}; + use crate::reader::Reader; + use std::io::Seek; + + let mut buf = ZBuf::empty(); + buf.push_zslice([0u8, 1u8, 2u8, 3u8].into()); + buf.push_zslice([4u8, 5u8, 6u8, 7u8, 8u8].into()); + buf.push_zslice([9u8, 10u8, 11u8, 12u8, 13u8, 14u8].into()); + let mut reader = buf.reader(); + + assert_eq!(reader.stream_position().unwrap(), 0); + assert_eq!(reader.read_u8().unwrap(), 0); + assert_eq!(reader.seek(std::io::SeekFrom::Current(6)).unwrap(), 7); + assert_eq!(reader.read_u8().unwrap(), 7); + assert_eq!(reader.seek(std::io::SeekFrom::Current(-5)).unwrap(), 3); + assert_eq!(reader.read_u8().unwrap(), 3); + assert_eq!(reader.seek(std::io::SeekFrom::Current(10)).unwrap(), 14); + assert_eq!(reader.read_u8().unwrap(), 14); + reader.seek(std::io::SeekFrom::Current(100)).unwrap_err(); + + assert_eq!(reader.seek(std::io::SeekFrom::Start(0)).unwrap(), 0); + assert_eq!(reader.read_u8().unwrap(), 0); + assert_eq!(reader.seek(std::io::SeekFrom::Start(12)).unwrap(), 12); + assert_eq!(reader.read_u8().unwrap(), 12); + assert_eq!(reader.seek(std::io::SeekFrom::Start(15)).unwrap(), 15); + reader.read_u8().unwrap_err(); + reader.seek(std::io::SeekFrom::Start(100)).unwrap_err(); + + assert_eq!(reader.seek(std::io::SeekFrom::End(0)).unwrap(), 15); + reader.read_u8().unwrap_err(); + assert_eq!(reader.seek(std::io::SeekFrom::End(-5)).unwrap(), 10); + assert_eq!(reader.read_u8().unwrap(), 10); + assert_eq!(reader.seek(std::io::SeekFrom::End(-15)).unwrap(), 0); + assert_eq!(reader.read_u8().unwrap(), 0); + reader.seek(std::io::SeekFrom::End(-20)).unwrap_err(); + + assert_eq!(reader.seek(std::io::SeekFrom::Start(10)).unwrap(), 10); + reader.seek(std::io::SeekFrom::Current(-100)).unwrap_err(); } } From a1c2a024e6343222eb110595ee166804b24d0397 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 10 Apr 2024 15:12:25 +0200 Subject: [PATCH 199/357] Fix ZBur io::Read impl --- commons/zenoh-buffers/src/zbuf.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index f3621049b0..4a655ce36a 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -428,10 +428,7 @@ impl<'a> io::Read for ZBufReader<'a> { fn read(&mut self, buf: &mut [u8]) -> io::Result { match ::read(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Err(io::Error::new( - io::ErrorKind::UnexpectedEof, - "UnexpectedEof", - )), + Err(_) => Ok(0), } } } From 834be851a79b4787ad4ad3639c28dd86e66c8c12 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 10 Apr 2024 18:00:39 +0200 Subject: [PATCH 200/357] compilation fixes --- zenoh-ext/src/publication_cache.rs | 4 +- zenoh/src/net/tests/tables.rs | 69 ------------------------------ 2 files changed, 2 insertions(+), 71 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 608a051d05..9f2b645da9 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -181,9 +181,9 @@ impl<'a> PublicationCache<'a> { sample = sub_recv.recv_async() => { if let Ok(sample) = sample { let queryable_key_expr: KeyExpr<'_> = if let Some(prefix) = &queryable_prefix { - prefix.join(&sample.key_expr).unwrap().into() + prefix.join(&sample.key_expr()).unwrap().into() } else { - sample.key_expr.clone() + sample.key_expr().clone() }; if let Some(queue) = cache.get_mut(queryable_key_expr.as_keyexpr()) { diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index f5a4b24e2a..35db2a7ac4 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -234,75 +234,6 @@ fn multisub_test() { tables::close_face(&tables, &face0); } -#[test] -fn multisub_test() { - let config = Config::default(); - let router = Router::new( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - Some(Arc::new(HLC::default())), - &config, - ) - .unwrap(); - let tables = router.tables.clone(); - - let primitives = Arc::new(DummyPrimitives {}); - let face0 = Arc::downgrade(&router.new_primitives(primitives).state); - assert!(face0.upgrade().is_some()); - - // -------------- - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, - }; - declare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 0, - &"sub".into(), - &sub_info, - NodeId::default(), - ); - let optres = Resource::get_resource(zread!(tables.tables)._get_root(), "sub") - .map(|res| Arc::downgrade(&res)); - assert!(optres.is_some()); - let res = optres.unwrap(); - assert!(res.upgrade().is_some()); - - declare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 1, - &"sub".into(), - &sub_info, - NodeId::default(), - ); - assert!(res.upgrade().is_some()); - - undeclare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 0, - &WireExpr::empty(), - NodeId::default(), - ); - assert!(res.upgrade().is_some()); - - undeclare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 1, - &WireExpr::empty(), - NodeId::default(), - ); - assert!(res.upgrade().is_none()); - - tables::close_face(&tables, &face0); -} - #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn clean_test() { let config = Config::default(); From 9888b6b4685b7f6a9f3667ed9b5b8acf7c7dd488 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 11 Apr 2024 12:41:25 +0200 Subject: [PATCH 201/357] compilation fixes --- zenoh-ext/src/publication_cache.rs | 4 ++-- zenoh/src/api/queryable.rs | 11 ----------- zenoh/src/api/sample.rs | 6 ------ zenoh/src/api/scouting.rs | 1 - zenoh/src/api/session.rs | 10 ++-------- zenoh/src/lib.rs | 3 +++ zenoh/src/net/routing/hat/linkstate_peer/network.rs | 2 +- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 2 +- zenoh/tests/session.rs | 6 +++--- 9 files changed, 12 insertions(+), 33 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 7687593b7d..8b9cc261df 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -17,7 +17,7 @@ use std::future::Ready; use std::time::Duration; use zenoh::core::Error; use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; -use zenoh::internal::ResolveFuture; +use zenoh::internal::{ResolveFuture, TerminatableTask}; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::queryable::{Query, Queryable}; use zenoh::runtime::ZRuntime; @@ -173,7 +173,7 @@ impl<'a> PublicationCache<'a> { let token = TerminatableTask::create_cancellation_token(); let token2 = token.clone(); let task = TerminatableTask::spawn( - zenoh_runtime::ZRuntime::Application, + ZRuntime::Application, async move { let mut cache: HashMap> = HashMap::with_capacity(resources_limit.unwrap_or(32)); diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 3607a2aa0a..8cd6292e3d 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -27,17 +27,6 @@ use super::{ }; use crate::net::primitives::Primitives; use std::{fmt, future::Ready, ops::Deref, sync::Arc}; -use crate::prelude::*; -use crate::sample::builder::SampleBuilder; -use crate::sample::QoSBuilder; -#[cfg(feature = "unstable")] -use crate::sample::SourceInfo; -use crate::Id; -use crate::SessionRef; -use crate::Undeclarable; -#[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment}; -use std::fmt; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::{ diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 46ecf7cc16..f2ff96fb04 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -30,12 +30,6 @@ use zenoh_protocol::{ pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; #[zenoh_macros::unstable] use serde::Serialize; -use std::{convert::TryFrom, fmt}; -use zenoh_protocol::core::CongestionControl; -use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::network::declare::ext::QoSType; - -pub mod builder; pub type SourceSn = u64; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 3e213539c3..058ab82058 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -13,7 +13,6 @@ // use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::net::runtime::{orchestrator::Loop, Runtime}; -use futures::StreamExt; use std::time::Duration; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; use tokio::net::UdpSocket; diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 5dce02d9e6..3f2bcb07f3 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -51,8 +51,6 @@ use zenoh_config::{unwrap_or_default, Config, Notifier}; use zenoh_core::{ zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, SyncResolve, }; -use zenoh_config::unwrap_or_default; -use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; #[cfg(feature = "unstable")] use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ @@ -80,16 +78,12 @@ use zenoh_task::TaskController; use zenoh_util::core::AsyncResolve; #[cfg(feature = "unstable")] -use { - super::{ +use super::{ liveliness::{Liveliness, LivelinessTokenState}, publication::Publisher, publication::{MatchingListenerState, MatchingStatus}, sample::{Attachment, SourceInfo}, - }, - zenoh_protocol::network::declare::SubscriberId, - zenoh_protocol::network::ext, -}; + }; zconfigurable! { pub(crate) static ref API_DATA_RECEPTION_CHANNEL_SIZE: usize = 256; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 247d1d68a0..c0840c8829 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -223,6 +223,7 @@ pub mod value { /// Encoding support pub mod encoding { pub use crate::api::encoding::Encoding; + pub use crate::api::encoding::EncodingBuilder; } /// Payload primitives @@ -256,6 +257,7 @@ pub mod subscriber { /// Publishing primitives pub mod publication { pub use crate::api::builders::publication::PublisherBuilder; + #[zenoh_macros::unstable] pub use crate::api::publication::MatchingListener; pub use crate::api::publication::Priority; pub use crate::api::publication::Publisher; @@ -349,6 +351,7 @@ pub mod internal { pub use zenoh_util::core::ResolveFuture; pub use zenoh_util::LibLoader; pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; + pub use zenoh_task::TerminatableTask; } #[cfg(feature = "shared-memory")] diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index f1c376df20..541594f0ca 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -15,7 +15,7 @@ use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; use crate::net::routing::dispatcher::tables::NodeId; use crate::net::runtime::Runtime; -use crate::runtime::WeakRuntime; +use crate::net::runtime::WeakRuntime; use petgraph::graph::NodeIndex; use petgraph::visit::{VisitMap, Visitable}; use std::convert::TryInto; diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index 9a19dac6f6..88a86f51f4 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -14,7 +14,7 @@ use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; use crate::net::runtime::Runtime; -use crate::runtime::WeakRuntime; +use crate::net::runtime::WeakRuntime; use petgraph::graph::NodeIndex; use std::convert::TryInto; use vec_map::VecMap; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 0a6e48d228..06d61dbd5a 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -293,11 +293,11 @@ async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) async fn zenoh_2sessions_1runtime_init() { let (r1, r2) = open_session_unicast_runtime(&["tcp/127.0.0.1:17449"]).await; println!("[RI][02a] Creating peer01 session from runtime 1"); - let peer01 = zenoh::init(r1.clone()).res_async().await.unwrap(); + let peer01 = zenoh::session::init(r1.clone()).res_async().await.unwrap(); println!("[RI][02b] Creating peer02 session from runtime 2"); - let peer02 = zenoh::init(r2.clone()).res_async().await.unwrap(); + let peer02 = zenoh::session::init(r2.clone()).res_async().await.unwrap(); println!("[RI][02c] Creating peer01a session from runtime 1"); - let peer01a = zenoh::init(r1.clone()).res_async().await.unwrap(); + let peer01a = zenoh::session::init(r1.clone()).res_async().await.unwrap(); println!("[RI][03c] Closing peer01a session"); std::mem::drop(peer01a); test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; From a1b50dd4ae6edb1a345acece51d5aec075f750e2 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 11 Apr 2024 13:40:23 +0200 Subject: [PATCH 202/357] Protocol attachment to payload (#923) * Attachment typedef * Fix io::Write for ZBuf and Payload * FIx doc * Add payload serializer test * OptionPayload for API ergonomicity --- commons/zenoh-buffers/src/zbuf.rs | 8 +- examples/examples/z_pub.rs | 27 +--- examples/examples/z_sub.rs | 9 +- zenoh/src/payload.rs | 106 ++++++++++++--- zenoh/src/publication.rs | 4 +- zenoh/src/query.rs | 4 +- zenoh/src/queryable.rs | 4 +- zenoh/src/sample/builder.rs | 8 +- zenoh/src/sample/mod.rs | 216 ++---------------------------- zenoh/tests/attachments.rs | 72 +++++----- 10 files changed, 167 insertions(+), 291 deletions(-) diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 4a655ce36a..616dbb1b96 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -720,12 +720,12 @@ impl BacktrackableWriter for ZBufWriter<'_> { #[cfg(feature = "std")] impl<'a> io::Write for ZBufWriter<'a> { fn write(&mut self, buf: &[u8]) -> io::Result { + if buf.is_empty() { + return Ok(0); + } match ::write(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Err(io::Error::new( - io::ErrorKind::UnexpectedEof, - "UnexpectedEof", - )), + Err(_) => Err(io::ErrorKind::UnexpectedEof.into()), } } diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 8cd3c4edba..68fbf02ca2 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -35,16 +35,12 @@ async fn main() { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {value}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); - let mut put = publisher.put(buf); - if let Some(attachment) = &attachment { - put = put.attachment(Some( - attachment - .split('&') - .map(|pair| split_once(pair, '=')) - .collect(), - )) - } - put.res().await.unwrap(); + publisher + .put(buf) + .attachment(&attachment) + .res() + .await + .unwrap(); } } @@ -65,17 +61,6 @@ struct Args { common: CommonArgs, } -fn split_once(s: &str, c: char) -> (&[u8], &[u8]) { - let s_bytes = s.as_bytes(); - match s.find(c) { - Some(index) => { - let (l, r) = s_bytes.split_at(index); - (l, &r[1..]) - } - None => (s_bytes, &[]), - } -} - fn parse_args() -> (Config, KeyExpr<'static>, String, Option) { let args = Args::parse(); (args.common.into(), args.key, args.value, args.attach) diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 299f0c8f49..1e19bbff0e 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -40,12 +40,19 @@ async fn main() { .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); - println!( + print!( ">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), sample.key_expr().as_str(), payload ); + if let Some(att) = sample.attachment() { + let att = att + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + print!(" ({})", att); + } + println!(); } } diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index eac4f58e7c..11a6f0c360 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -91,6 +91,11 @@ impl Payload { Ok(Payload::new(buf)) } + /// Get a [`PayloadWriter`] implementing [`std::io::Write`] trait. + pub fn writer(&mut self) -> PayloadWriter<'_> { + PayloadWriter(self.0.writer()) + } + /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. pub fn iter(&self) -> PayloadIterator<'_, T> where @@ -104,12 +109,7 @@ impl Payload { } } - /// Get a [`PayloadWriter`] implementing [`std::io::Write`] trait. - pub fn writer(&mut self) -> PayloadWriter<'_> { - PayloadWriter(self.0.writer()) - } - - /// Encode an object of type `T` as a [`Value`] using the [`ZSerde`]. + /// Serialize an object of type `T` as a [`Value`] using the [`ZSerde`]. /// /// ```rust /// use zenoh::payload::Payload; @@ -126,7 +126,7 @@ impl Payload { ZSerde.serialize(t) } - /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. + /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn deserialize<'a, T>(&'a self) -> ZResult where ZSerde: Deserialize<'a, T>, @@ -137,7 +137,7 @@ impl Payload { .map_err(|e| zerror!("{:?}", e).into()) } - /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. + /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn into<'a, T>(&'a self) -> T where ZSerde: Deserialize<'a, T, Error = Infallible>, @@ -231,6 +231,50 @@ where } } +/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. +#[repr(transparent)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct OptionPayload(Option); + +impl From for OptionPayload +where + T: Into, +{ + fn from(value: T) -> Self { + Self(Some(value.into())) + } +} + +impl From> for OptionPayload +where + T: Into, +{ + fn from(mut value: Option) -> Self { + match value.take() { + Some(v) => Self(Some(v.into())), + None => Self(None), + } + } +} + +impl From<&Option> for OptionPayload +where + for<'a> &'a T: Into, +{ + fn from(value: &Option) -> Self { + match value.as_ref() { + Some(v) => Self(Some(v.into())), + None => Self(None), + } + } +} + +impl From for Option { + fn from(value: OptionPayload) -> Self { + value.0 + } +} + /// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. /// It also supports common Rust serde values. #[derive(Clone, Copy, Debug)] @@ -858,7 +902,7 @@ impl Serialize<&serde_yaml::Value> for ZSerde { fn serialize(self, t: &serde_yaml::Value) -> Self::Output { let mut payload = Payload::empty(); - serde_yaml::to_writer(payload.0.writer(), t)?; + serde_yaml::to_writer(payload.writer(), t)?; Ok(payload) } } @@ -1092,15 +1136,9 @@ impl TryFrom for SharedMemoryBuf { } // Tuple -impl Serialize<(A, B)> for ZSerde -where - A: Into, - B: Into, -{ - type Output = Payload; - - fn serialize(self, t: (A, B)) -> Self::Output { - let (a, b) = t; +macro_rules! impl_tuple { + ($t:expr) => {{ + let (a, b) = $t; let codec = Zenoh080::new(); let mut buffer: ZBuf = ZBuf::empty(); @@ -1117,6 +1155,29 @@ where } Payload::new(buffer) + }}; +} +impl Serialize<(A, B)> for ZSerde +where + A: Into, + B: Into, +{ + type Output = Payload; + + fn serialize(self, t: (A, B)) -> Self::Output { + impl_tuple!(t) + } +} + +impl Serialize<&(A, B)> for ZSerde +where + for<'a> &'a A: Into, + for<'b> &'b B: Into, +{ + type Output = Payload; + + fn serialize(self, t: &(A, B)) -> Self::Output { + impl_tuple!(t) } } @@ -1402,5 +1463,14 @@ mod tests { println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(String::from("0"), String::from("a")); + hm.insert(String::from("1"), String::from("b")); + println!("Serialize:\t{:?}", hm); + let p = Payload::from_iter(hm.iter()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(String, String)>()); + assert_eq!(hm, o); } } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 4f31c73a24..cdd9e810a6 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -14,6 +14,7 @@ //! Publishing primitives. use crate::net::primitives::Primitives; +use crate::payload::OptionPayload; use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; @@ -167,7 +168,8 @@ impl SampleBuilderTrait for PublicationBuilder { } } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: TA) -> Self { + fn attachment>(self, attachment: TA) -> Self { + let attachment: OptionPayload = attachment.into(); Self { attachment: attachment.into(), ..self diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 3a380bd1c9..96b2ccec38 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -14,6 +14,7 @@ //! Query primitives. use crate::handlers::{locked, Callback, DefaultHandler}; +use crate::payload::OptionPayload; use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; @@ -144,7 +145,8 @@ impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { + fn attachment>(self, attachment: T) -> Self { + let attachment: OptionPayload = attachment.into(); Self { attachment: attachment.into(), ..self diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 0ad3a36c07..8d057c592b 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -17,6 +17,7 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; +use crate::payload::OptionPayload; use crate::prelude::*; use crate::sample::builder::SampleBuilder; use crate::sample::QoSBuilder; @@ -308,7 +309,8 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: U) -> Self { + fn attachment>(self, attachment: U) -> Self { + let attachment: OptionPayload = attachment.into(); Self { attachment: attachment.into(), ..self diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index bad35024ef..79acde33a3 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -14,8 +14,9 @@ use std::marker::PhantomData; +use crate::payload::OptionPayload; #[cfg(feature = "unstable")] -use crate::sample::{Attachment, SourceInfo}; +use crate::sample::SourceInfo; use crate::sample::{QoS, QoSBuilder}; use crate::Encoding; use crate::KeyExpr; @@ -51,7 +52,7 @@ pub trait SampleBuilderTrait { fn source_info(self, source_info: SourceInfo) -> Self; /// Attach user-provided data in key-value format #[zenoh_macros::unstable] - fn attachment>>(self, attachment: T) -> Self; + fn attachment>(self, attachment: T) -> Self; } pub trait ValueBuilderTrait { @@ -177,7 +178,8 @@ impl SampleBuilderTrait for SampleBuilder { } #[zenoh_macros::unstable] - fn attachment>>(self, attachment: U) -> Self { + fn attachment>(self, attachment: U) -> Self { + let attachment: OptionPayload = attachment.into(); Self { sample: Sample { attachment: attachment.into(), diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 0ef8462d2a..2429f138ee 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -212,226 +212,26 @@ impl From> for SourceInfo { } mod attachment { - #[zenoh_macros::unstable] - use zenoh_buffers::{ - reader::{HasReader, Reader}, - writer::HasWriter, - ZBuf, ZBufReader, ZSlice, - }; - #[zenoh_macros::unstable] - use zenoh_codec::{RCodec, WCodec, Zenoh080}; + use crate::Payload; #[zenoh_macros::unstable] use zenoh_protocol::zenoh::ext::AttachmentType; - /// A builder for [`Attachment`] - #[zenoh_macros::unstable] - #[derive(Debug)] - pub struct AttachmentBuilder { - pub(crate) inner: Vec, - } #[zenoh_macros::unstable] - impl Default for AttachmentBuilder { - fn default() -> Self { - Self::new() - } - } - #[zenoh_macros::unstable] - impl AttachmentBuilder { - pub fn new() -> Self { - Self { inner: Vec::new() } - } - fn _insert(&mut self, key: &[u8], value: &[u8]) { - let codec = Zenoh080; - let mut writer = self.inner.writer(); - codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure - codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure - } - /// Inserts a key-value pair to the attachment. - /// - /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. - pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( - &mut self, - key: &Key, - value: &Value, - ) { - self._insert(key.as_ref(), value.as_ref()) - } - pub fn build(self) -> Attachment { - Attachment { - inner: self.inner.into(), - } - } - } - #[zenoh_macros::unstable] - impl From for Attachment { - fn from(value: AttachmentBuilder) -> Self { - Attachment { - inner: value.inner.into(), - } - } - } - #[zenoh_macros::unstable] - impl From for Option { - fn from(value: AttachmentBuilder) -> Self { - if value.inner.is_empty() { - None - } else { - Some(value.into()) - } - } - } + pub type Attachment = Payload; - #[zenoh_macros::unstable] - #[derive(Clone)] - pub struct Attachment { - pub(crate) inner: ZBuf, - } - #[zenoh_macros::unstable] - impl Default for Attachment { - fn default() -> Self { - Self::new() - } - } #[zenoh_macros::unstable] impl From for AttachmentType { fn from(this: Attachment) -> Self { - AttachmentType { buffer: this.inner } + AttachmentType { + buffer: this.into(), + } } } + #[zenoh_macros::unstable] impl From> for Attachment { fn from(this: AttachmentType) -> Self { - Attachment { inner: this.buffer } - } - } - #[zenoh_macros::unstable] - impl Attachment { - pub fn new() -> Self { - Self { - inner: ZBuf::empty(), - } - } - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - pub fn len(&self) -> usize { - self.iter().count() - } - pub fn iter(&self) -> AttachmentIterator { - self.into_iter() - } - fn _get(&self, key: &[u8]) -> Option { - self.iter() - .find_map(|(k, v)| (k.as_slice() == key).then_some(v)) - } - pub fn get>(&self, key: &Key) -> Option { - self._get(key.as_ref()) - } - fn _insert(&mut self, key: &[u8], value: &[u8]) { - let codec = Zenoh080; - let mut writer = self.inner.writer(); - codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure - codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure - } - /// Inserts a key-value pair to the attachment. - /// - /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. - /// - /// [`Attachment`] is not very efficient at inserting, so if you wish to perform multiple inserts, it's generally better to [`Attachment::extend`] after performing the inserts on an [`AttachmentBuilder`] - pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( - &mut self, - key: &Key, - value: &Value, - ) { - self._insert(key.as_ref(), value.as_ref()) - } - fn _extend(&mut self, with: Self) -> &mut Self { - for slice in with.inner.zslices().cloned() { - self.inner.push_zslice(slice); - } - self - } - pub fn extend(&mut self, with: impl Into) -> &mut Self { - let with = with.into(); - self._extend(with) - } - } - #[zenoh_macros::unstable] - pub struct AttachmentIterator<'a> { - reader: ZBufReader<'a>, - } - #[zenoh_macros::unstable] - impl<'a> core::iter::IntoIterator for &'a Attachment { - type Item = (ZSlice, ZSlice); - type IntoIter = AttachmentIterator<'a>; - fn into_iter(self) -> Self::IntoIter { - AttachmentIterator { - reader: self.inner.reader(), - } - } - } - #[zenoh_macros::unstable] - impl core::fmt::Debug for Attachment { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{{")?; - for (key, value) in self { - let key = key.as_slice(); - let value = value.as_slice(); - match core::str::from_utf8(key) { - Ok(key) => write!(f, "\"{key}\": ")?, - Err(_) => { - write!(f, "0x")?; - for byte in key { - write!(f, "{byte:02X}")? - } - } - } - match core::str::from_utf8(value) { - Ok(value) => write!(f, "\"{value}\", ")?, - Err(_) => { - write!(f, "0x")?; - for byte in value { - write!(f, "{byte:02X}")? - } - write!(f, ", ")? - } - } - } - write!(f, "}}") - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::Iterator for AttachmentIterator<'a> { - type Item = (ZSlice, ZSlice); - fn next(&mut self) -> Option { - let key = Zenoh080.read(&mut self.reader).ok()?; - let value = Zenoh080.read(&mut self.reader).ok()?; - Some((key, value)) - } - fn size_hint(&self) -> (usize, Option) { - ( - (self.reader.remaining() != 0) as usize, - Some(self.reader.remaining() / 2), - ) - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for AttachmentBuilder { - fn from_iter>(iter: T) -> Self { - let codec = Zenoh080; - let mut buffer: Vec = Vec::new(); - let mut writer = buffer.writer(); - for (key, value) in iter { - codec.write(&mut writer, key).unwrap(); // Infallible, barring allocation failures - codec.write(&mut writer, value).unwrap(); // Infallible, barring allocation failures - } - Self { inner: buffer } - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for Attachment { - fn from_iter>(iter: T) -> Self { - AttachmentBuilder::from_iter(iter).into() + this.buffer.into() } } } @@ -468,7 +268,7 @@ impl TryFrom for SampleKind { } #[zenoh_macros::unstable] -pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; +pub use attachment::Attachment; /// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. pub struct SampleFields { diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 9fb99b7cc0..2a58749701 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -13,20 +13,25 @@ // #[cfg(feature = "unstable")] #[test] -fn pubsub() { +fn attachment_pubsub() { use zenoh::prelude::sync::*; + use zenoh::sample::Attachment; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh .declare_subscriber("test/attachment") .callback(|sample| { println!("{}", sample.payload().deserialize::().unwrap()); - for (k, v) in sample.attachment().unwrap() { + for (k, v) in sample.attachment().unwrap().iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } }) .res() .unwrap(); + let publisher = zenoh.declare_publisher("test/attachment").res().unwrap(); for i in 0..10 { let mut backer = [( @@ -36,55 +41,57 @@ fn pubsub() { for (j, backer) in backer.iter_mut().enumerate() { *backer = ((i * 10 + j).to_le_bytes(), (i * 10 + j).to_be_bytes()) } + zenoh .put("test/attachment", "put") - .attachment(Some( - backer - .iter() - .map(|b| (b.0.as_slice(), b.1.as_slice())) - .collect(), - )) + .attachment(Attachment::from_iter(backer.iter())) .res() .unwrap(); publisher .put("publisher") - .attachment(Some( - backer - .iter() - .map(|b| (b.0.as_slice(), b.1.as_slice())) - .collect(), - )) + .attachment(Attachment::from_iter(backer.iter())) .res() .unwrap(); } } + #[cfg(feature = "unstable")] #[test] -fn queries() { +fn attachment_queries() { use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait, sample::Attachment}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh .declare_queryable("test/attachment") .callback(|query| { - println!( - "{}", - query - .value() - .map(|q| q.payload.deserialize::().unwrap()) - .unwrap_or_default() - ); - let mut attachment = Attachment::new(); - for (k, v) in query.attachment().unwrap() { + let s = query + .value() + .map(|q| q.payload.deserialize::().unwrap()) + .unwrap_or_default(); + println!("Query value: {}", s); + + let attachment = query.attachment().unwrap(); + println!("Query attachment: {:?}", attachment); + for (k, v) in attachment.iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)); - attachment.insert(&k, &k); } + query .reply( query.key_expr().clone(), query.value().unwrap().payload.clone(), ) - .attachment(attachment) + .attachment(Attachment::from_iter( + attachment + .iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() + .map(|(k, _)| (k, k)), + )) .res() .unwrap(); }) @@ -98,20 +105,19 @@ fn queries() { for (j, backer) in backer.iter_mut().enumerate() { *backer = ((i * 10 + j).to_le_bytes(), (i * 10 + j).to_be_bytes()) } + let get = zenoh .get("test/attachment") .payload("query") - .attachment(Some( - backer - .iter() - .map(|b| (b.0.as_slice(), b.1.as_slice())) - .collect(), - )) + .attachment(Attachment::from_iter(backer.iter())) .res() .unwrap(); while let Ok(reply) = get.recv() { let response = reply.sample.as_ref().unwrap(); - for (k, v) in response.attachment().unwrap() { + for (k, v) in response.attachment().unwrap().iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() { assert_eq!(k, v) } } From be6d3b0165bacc68238079f9f318d5894ca97c45 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 11 Apr 2024 14:11:02 +0200 Subject: [PATCH 203/357] Fix ci/valgrind --- .../src/pub_sub/bin/z_pub_sub.rs | 9 +++-- .../src/queryable_get/bin/z_queryable_get.rs | 33 ++++++++++++------- zenoh/src/publication.rs | 5 ++- zenoh/src/query.rs | 5 ++- zenoh/src/queryable.rs | 9 ++--- zenoh/src/sample/builder.rs | 9 ++--- zenoh/src/sample/mod.rs | 7 ++-- 7 files changed, 44 insertions(+), 33 deletions(-) diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index fac3437f39..454be2a869 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -38,9 +38,12 @@ async fn main() { .callback(|sample| { println!( ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, - sample.key_expr.as_str(), - sample.value + sample.kind(), + sample.key_expr().as_str(), + sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) ); }) .res() diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 102b6a036c..ea0f16399c 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -30,13 +30,16 @@ async fn main() { .declare_queryable(&queryable_key_expr.clone()) .callback(move |query| { println!(">> Handling query '{}'", query.selector()); - let reply = Ok(Sample::new( - queryable_key_expr.clone(), - query.value().unwrap().clone(), - )); - zenoh_runtime::ZRuntime::Application.block_in_place( - async move { query.reply(reply).res().await.unwrap(); } - ); + zenoh_runtime::ZRuntime::Application.block_in_place(async move { + query + .reply( + query.selector().key_expr, + query.value().unwrap().payload.clone(), + ) + .res() + .await + .unwrap(); + }); }) .complete(true) .res() @@ -51,7 +54,7 @@ async fn main() { println!("Sending Query '{get_selector}'..."); let replies = get_session .get(&get_selector) - .with_value(idx) + .value(idx) .target(QueryTarget::All) .res() .await @@ -60,10 +63,18 @@ async fn main() { match reply.sample { Ok(sample) => println!( ">> Received ('{}': '{}')", - sample.key_expr.as_str(), - sample.value, + sample.key_expr().as_str(), + sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + Err(err) => println!( + ">> Received (ERROR: '{}')", + err.payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) ), - Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), } } } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index cdd9e810a6..f36e253636 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -14,16 +14,15 @@ //! Publishing primitives. use crate::net::primitives::Primitives; -use crate::payload::OptionPayload; use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::sample::Attachment; use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ handlers::{Callback, DefaultHandler, IntoHandler}, + payload::OptionPayload, + sample::Attachment, Id, }; use std::future::Ready; diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 96b2ccec38..7b8da9f768 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -14,12 +14,11 @@ //! Query primitives. use crate::handlers::{locked, Callback, DefaultHandler}; -use crate::payload::OptionPayload; use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::sample::Attachment; use crate::sample::QoSBuilder; use crate::Session; +#[cfg(feature = "unstable")] +use crate::{payload::OptionPayload, sample::Attachment}; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 8d057c592b..a6d87df5a4 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -17,17 +17,18 @@ use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; -use crate::payload::OptionPayload; use crate::prelude::*; use crate::sample::builder::SampleBuilder; use crate::sample::QoSBuilder; -#[cfg(feature = "unstable")] -use crate::sample::SourceInfo; use crate::Id; use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment}; +use crate::{ + payload::OptionPayload, + query::ReplyKeyExpr, + sample::{Attachment, SourceInfo}, +}; use std::fmt; use std::future::Ready; use std::ops::Deref; diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 79acde33a3..6dc85c4046 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -11,12 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // - -use std::marker::PhantomData; - -use crate::payload::OptionPayload; -#[cfg(feature = "unstable")] -use crate::sample::SourceInfo; use crate::sample::{QoS, QoSBuilder}; use crate::Encoding; use crate::KeyExpr; @@ -25,6 +19,9 @@ use crate::Priority; use crate::Sample; use crate::SampleKind; use crate::Value; +#[cfg(feature = "unstable")] +use crate::{payload::OptionPayload, sample::SourceInfo}; +use std::marker::PhantomData; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 2429f138ee..f4fb1e074a 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -19,7 +19,7 @@ use crate::prelude::{KeyExpr, Value}; use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; use crate::time::Timestamp; use crate::Priority; -#[zenoh_macros::unstable] +#[cfg(feature = "unstable")] use serde::Serialize; use std::{convert::TryFrom, fmt}; use zenoh_protocol::core::CongestionControl; @@ -212,8 +212,9 @@ impl From> for SourceInfo { } mod attachment { - use crate::Payload; - #[zenoh_macros::unstable] + #[cfg(feature = "unstable")] + use crate::payload::Payload; + #[cfg(feature = "unstable")] use zenoh_protocol::zenoh::ext::AttachmentType; #[zenoh_macros::unstable] From d86653e4a59b7c28ea016ceabfa734c8b81596cc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 11 Apr 2024 16:09:01 +0200 Subject: [PATCH 204/357] RingChannel sync/async/blocking/non-blocking (#903) * RingChannel sync/async/blocking/non-blocking * Add comment in the examples --- Cargo.lock | 50 ++++----- examples/examples/z_get.rs | 4 + examples/examples/z_pull.rs | 56 +++++++--- examples/examples/z_queryable.rs | 4 + zenoh/src/handlers.rs | 186 ------------------------------- zenoh/src/handlers/callback.rs | 90 +++++++++++++++ zenoh/src/handlers/fifo.rs | 61 ++++++++++ zenoh/src/handlers/mod.rs | 52 +++++++++ zenoh/src/handlers/ring.rs | 116 +++++++++++++++++++ zenoh/src/lib.rs | 2 +- zenoh/src/liveliness.rs | 4 +- zenoh/src/publication.rs | 4 +- zenoh/src/session.rs | 10 +- zenoh/tests/handler.rs | 11 +- 14 files changed, 407 insertions(+), 243 deletions(-) delete mode 100644 zenoh/src/handlers.rs create mode 100644 zenoh/src/handlers/callback.rs create mode 100644 zenoh/src/handlers/fifo.rs create mode 100644 zenoh/src/handlers/mod.rs create mode 100644 zenoh/src/handlers/ring.rs diff --git a/Cargo.lock b/Cargo.lock index 09e598d878..66c6c4f2c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,9 +165,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" dependencies = [ "anstyle", "anstyle-parse", @@ -1103,9 +1103,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +checksum = "6c012a26a7f605efc424dd53697843a72be7dc86ad2d01f7814337794a12231d" dependencies = [ "anstream", "anstyle", @@ -1122,9 +1122,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "erased-serde" -version = "0.4.4" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b73807008a3c7f171cc40312f37d95ef0396e048b5848d775f54b1a4dd4a0d3" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ "serde", ] @@ -1541,9 +1541,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" dependencies = [ "bytes", "fnv", @@ -1854,9 +1854,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" dependencies = [ "serde", "value-bag", @@ -2865,9 +2865,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.3" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" dependencies = [ "log", "ring 0.17.6", @@ -2923,9 +2923,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" [[package]] name = "rustls-webpki" @@ -3701,9 +3701,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.37.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -3743,7 +3743,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.22.3", + "rustls 0.22.2", "rustls-pki-types", "tokio", ] @@ -4036,9 +4036,9 @@ dependencies = [ [[package]] name = "value-bag" -version = "1.8.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" +checksum = "d92ccd67fb88503048c01b59152a04effd0782d035a83a6d256ce6085f08f4a3" dependencies = [ "value-bag-serde1", "value-bag-sval2", @@ -4046,9 +4046,9 @@ dependencies = [ [[package]] name = "value-bag-serde1" -version = "1.8.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc35703541cbccb5278ef7b589d79439fc808ff0b5867195a3230f9a47421d39" +checksum = "b0b9f3feef403a50d4d67e9741a6d8fc688bcbb4e4f31bd4aab72cc690284394" dependencies = [ "erased-serde", "serde", @@ -4057,9 +4057,9 @@ dependencies = [ [[package]] name = "value-bag-sval2" -version = "1.8.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "285b43c29d0b4c0e65aad24561baee67a1b69dc9be9375d4a85138cbf556f7f8" +checksum = "30b24f4146b6f3361e91cbf527d1fb35e9376c3c0cef72ca5ec5af6d640fad7d" dependencies = [ "sval", "sval_buffer", @@ -4686,7 +4686,7 @@ dependencies = [ "flume", "futures", "log", - "rustls 0.22.3", + "rustls 0.22.2", "rustls-webpki 0.102.2", "serde", "tokio", @@ -4773,7 +4773,7 @@ dependencies = [ "base64 0.21.4", "futures", "log", - "rustls 0.22.3", + "rustls 0.22.2", "rustls-pemfile 2.0.0", "rustls-pki-types", "rustls-webpki 0.102.2", diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 8735ae8daa..486346a8ea 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -30,6 +30,10 @@ async fn main() { println!("Sending Query '{selector}'..."); let replies = session .get(&selector) + // // By default get receives replies from a FIFO. + // // Uncomment this line to use a ring channel instead. + // // More information on the ring channel are available in the z_pull example. + .with(zenoh::handlers::RingChannel::default()) .value(value) .target(target) .timeout(timeout) diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 4e44930f4f..d6ae465555 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::{config::Config, handlers::RingBuffer, prelude::r#async::*}; +use zenoh::{config::Config, handlers::RingChannel, prelude::r#async::*}; use zenoh_examples::CommonArgs; #[tokio::main] @@ -29,32 +29,27 @@ async fn main() { println!("Declaring Subscriber on '{key_expr}'..."); let subscriber = session .declare_subscriber(&key_expr) - .with(RingBuffer::new(size)) + .with(RingChannel::new(size)) .res() .await .unwrap(); - println!( - "Pulling data every {:#?} seconds. Press CTRL-C to quit...", - interval - ); + println!("Press CTRL-C to quit..."); + + // Blocking recv. If the ring is empty, wait for the first sample to arrive. loop { - match subscriber.recv() { - Ok(Some(sample)) => { + // Use .recv() for the synchronous version. + match subscriber.recv_async().await { + Ok(sample) => { let payload = sample .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!( - ">> [Subscriber] Pulled {} ('{}': '{}')", + ">> [Subscriber] Pulled {} ('{}': '{}')... performing a computation of {:#?}", sample.kind(), sample.key_expr().as_str(), payload, - ); - } - Ok(None) => { - println!( - ">> [Subscriber] Pulled nothing... sleep for {:#?}", interval ); tokio::time::sleep(interval).await; @@ -65,6 +60,35 @@ async fn main() { } } } + + // Non-blocking recv. This can be usually used to implement a polling mechanism. + // loop { + // match subscriber.try_recv() { + // Ok(Some(sample)) => { + // let payload = sample + // .payload() + // .deserialize::() + // .unwrap_or_else(|e| format!("{}", e)); + // println!( + // ">> [Subscriber] Pulled {} ('{}': '{}')", + // sample.kind(), + // sample.key_expr().as_str(), + // payload, + // ); + // } + // Ok(None) => { + // println!( + // ">> [Subscriber] Pulled nothing... sleep for {:#?}", + // interval + // ); + // tokio::time::sleep(interval).await; + // } + // Err(e) => { + // println!(">> [Subscriber] Pull error: {e}"); + // return; + // } + // } + // } } #[derive(clap::Parser, Clone, PartialEq, Debug)] @@ -73,10 +97,10 @@ struct SubArgs { /// The Key Expression to subscribe to. key: KeyExpr<'static>, /// The size of the ringbuffer. - #[arg(long, default_value = "3")] + #[arg(short, long, default_value = "3")] size: usize, /// The interval for pulling the ringbuffer. - #[arg(long, default_value = "5.0")] + #[arg(short, long, default_value = "5.0")] interval: f32, #[command(flatten)] common: CommonArgs, diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 83ac63ce1f..5113f1c2b7 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -29,6 +29,10 @@ async fn main() { println!("Declaring Queryable on '{key_expr}'..."); let queryable = session .declare_queryable(&key_expr) + // // By default queryable receives queries from a FIFO. + // // Uncomment this line to use a ring channel instead. + // // More information on the ring channel are available in the z_pull example. + // .with(zenoh::handlers::RingChannel::default()) .complete(complete) .res() .await diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs deleted file mode 100644 index c5d2c6bb90..0000000000 --- a/zenoh/src/handlers.rs +++ /dev/null @@ -1,186 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Callback handler trait. -use crate::API_DATA_RECEPTION_CHANNEL_SIZE; - -use std::sync::{Arc, Mutex, Weak}; -use zenoh_collections::RingBuffer as RingBufferInner; -use zenoh_result::ZResult; - -/// An alias for `Arc`. -pub type Dyn = std::sync::Arc; - -/// An immutable callback function. -pub type Callback<'a, T> = Dyn; - -/// A type that can be converted into a [`Callback`]-handler pair. -/// -/// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, -/// while granting you access to the handler through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. -/// -/// Any closure that accepts `T` can be converted into a pair of itself and `()`. -pub trait IntoHandler<'a, T> { - type Handler; - - fn into_handler(self) -> (Callback<'a, T>, Self::Handler); -} - -impl<'a, T, F> IntoHandler<'a, T> for F -where - F: Fn(T) + Send + Sync + 'a, -{ - type Handler = (); - fn into_handler(self) -> (Callback<'a, T>, Self::Handler) { - (Dyn::from(self), ()) - } -} - -impl IntoHandler<'static, T> for (flume::Sender, flume::Receiver) { - type Handler = flume::Receiver; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { - let (sender, receiver) = self; - ( - Dyn::new(move |t| { - if let Err(e) = sender.send(t) { - log::error!("{}", e) - } - }), - receiver, - ) - } -} - -/// The default handler in Zenoh is a FIFO queue. -pub struct DefaultHandler; - -impl IntoHandler<'static, T> for DefaultHandler { - type Handler = flume::Receiver; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { - flume::bounded(*API_DATA_RECEPTION_CHANNEL_SIZE).into_handler() - } -} - -impl IntoHandler<'static, T> - for (std::sync::mpsc::SyncSender, std::sync::mpsc::Receiver) -{ - type Handler = std::sync::mpsc::Receiver; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { - let (sender, receiver) = self; - ( - Dyn::new(move |t| { - if let Err(e) = sender.send(t) { - log::error!("{}", e) - } - }), - receiver, - ) - } -} - -/// Ring buffer with a limited queue size, which allows users to keep the last N data. -pub struct RingBuffer { - ring: Arc>>, -} - -impl RingBuffer { - /// Initialize the RingBuffer with the capacity size. - pub fn new(capacity: usize) -> Self { - RingBuffer { - ring: Arc::new(Mutex::new(RingBufferInner::new(capacity))), - } - } -} - -pub struct RingBufferHandler { - ring: Weak>>, -} - -impl RingBufferHandler { - pub fn recv(&self) -> ZResult> { - let Some(ring) = self.ring.upgrade() else { - bail!("The ringbuffer has been deleted."); - }; - let mut guard = ring.lock().map_err(|e| zerror!("{}", e))?; - Ok(guard.pull()) - } -} - -impl IntoHandler<'static, T> for RingBuffer { - type Handler = RingBufferHandler; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { - let receiver = RingBufferHandler { - ring: Arc::downgrade(&self.ring), - }; - ( - Dyn::new(move |t| match self.ring.lock() { - Ok(mut g) => { - // Eventually drop the oldest element. - g.push_force(t); - } - Err(e) => log::error!("{}", e), - }), - receiver, - ) - } -} - -/// A function that can transform a [`FnMut`]`(T)` to -/// a [`Fn`]`(T)` with the help of a [`Mutex`](std::sync::Mutex). -pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { - let lock = std::sync::Mutex::new(fnmut); - move |x| zlock!(lock)(x) -} - -/// A handler containing 2 callback functions: -/// - `callback`: the typical callback function. `context` will be passed as its last argument. -/// - `drop`: a callback called when this handler is dropped. -/// -/// It is guaranteed that: -/// -/// - `callback` will never be called once `drop` has started. -/// - `drop` will only be called **once**, and **after every** `callback` has ended. -/// - The two previous guarantees imply that `call` and `drop` are never called concurrently. -pub struct CallbackDrop -where - DropFn: FnMut() + Send + Sync + 'static, -{ - pub callback: Callback, - pub drop: DropFn, -} - -impl Drop for CallbackDrop -where - DropFn: FnMut() + Send + Sync + 'static, -{ - fn drop(&mut self) { - (self.drop)() - } -} - -impl<'a, OnEvent, Event, DropFn> IntoHandler<'a, Event> for CallbackDrop -where - OnEvent: Fn(Event) + Send + Sync + 'a, - DropFn: FnMut() + Send + Sync + 'static, -{ - type Handler = (); - - fn into_handler(self) -> (Callback<'a, Event>, Self::Handler) { - (Dyn::from(move |evt| (self.callback)(evt)), ()) - } -} diff --git a/zenoh/src/handlers/callback.rs b/zenoh/src/handlers/callback.rs new file mode 100644 index 0000000000..21c1b0878c --- /dev/null +++ b/zenoh/src/handlers/callback.rs @@ -0,0 +1,90 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +use super::{Dyn, IntoHandler}; + +/// A function that can transform a [`FnMut`]`(T)` to +/// a [`Fn`]`(T)` with the help of a [`Mutex`](std::sync::Mutex). +pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { + let lock = std::sync::Mutex::new(fnmut); + move |x| zlock!(lock)(x) +} + +/// An immutable callback function. +pub type Callback<'a, T> = Dyn; + +impl<'a, T, F> IntoHandler<'a, T> for F +where + F: Fn(T) + Send + Sync + 'a, +{ + type Handler = (); + fn into_handler(self) -> (Callback<'a, T>, Self::Handler) { + (Dyn::from(self), ()) + } +} + +impl IntoHandler<'static, T> for (flume::Sender, flume::Receiver) { + type Handler = flume::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let (sender, receiver) = self; + ( + Dyn::new(move |t| { + if let Err(e) = sender.send(t) { + log::error!("{}", e) + } + }), + receiver, + ) + } +} + +/// A handler containing 2 callback functions: +/// - `callback`: the typical callback function. `context` will be passed as its last argument. +/// - `drop`: a callback called when this handler is dropped. +/// +/// It is guaranteed that: +/// +/// - `callback` will never be called once `drop` has started. +/// - `drop` will only be called **once**, and **after every** `callback` has ended. +/// - The two previous guarantees imply that `call` and `drop` are never called concurrently. +pub struct CallbackDrop +where + DropFn: FnMut() + Send + Sync + 'static, +{ + pub callback: Callback, + pub drop: DropFn, +} + +impl Drop for CallbackDrop +where + DropFn: FnMut() + Send + Sync + 'static, +{ + fn drop(&mut self) { + (self.drop)() + } +} + +impl<'a, OnEvent, Event, DropFn> IntoHandler<'a, Event> for CallbackDrop +where + OnEvent: Fn(Event) + Send + Sync + 'a, + DropFn: FnMut() + Send + Sync + 'static, +{ + type Handler = (); + + fn into_handler(self) -> (Callback<'a, Event>, Self::Handler) { + (Dyn::from(move |evt| (self.callback)(evt)), ()) + } +} diff --git a/zenoh/src/handlers/fifo.rs b/zenoh/src/handlers/fifo.rs new file mode 100644 index 0000000000..0fa3ab304c --- /dev/null +++ b/zenoh/src/handlers/fifo.rs @@ -0,0 +1,61 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +use super::{callback::Callback, Dyn, IntoHandler, API_DATA_RECEPTION_CHANNEL_SIZE}; + +/// The default handler in Zenoh is a FIFO queue. + +pub struct FifoChannel { + capacity: usize, +} + +impl FifoChannel { + /// Initialize the RingBuffer with the capacity size. + pub fn new(capacity: usize) -> Self { + Self { capacity } + } +} + +impl Default for FifoChannel { + fn default() -> Self { + Self::new(*API_DATA_RECEPTION_CHANNEL_SIZE) + } +} + +impl IntoHandler<'static, T> for FifoChannel { + type Handler = flume::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + flume::bounded(self.capacity).into_handler() + } +} + +impl IntoHandler<'static, T> + for (std::sync::mpsc::SyncSender, std::sync::mpsc::Receiver) +{ + type Handler = std::sync::mpsc::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let (sender, receiver) = self; + ( + Dyn::new(move |t| { + if let Err(e) = sender.send(t) { + log::error!("{}", e) + } + }), + receiver, + ) + } +} diff --git a/zenoh/src/handlers/mod.rs b/zenoh/src/handlers/mod.rs new file mode 100644 index 0000000000..627c166795 --- /dev/null +++ b/zenoh/src/handlers/mod.rs @@ -0,0 +1,52 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +mod callback; +mod fifo; +mod ring; + +pub use callback::*; +pub use fifo::*; +pub use ring::*; + +use crate::API_DATA_RECEPTION_CHANNEL_SIZE; + +/// An alias for `Arc`. +pub type Dyn = std::sync::Arc; + +/// A type that can be converted into a [`Callback`]-handler pair. +/// +/// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, +/// while granting you access to the handler through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. +/// +/// Any closure that accepts `T` can be converted into a pair of itself and `()`. +pub trait IntoHandler<'a, T> { + type Handler; + + fn into_handler(self) -> (Callback<'a, T>, Self::Handler); +} + +/// The default handler in Zenoh is a FIFO queue. +#[repr(transparent)] +#[derive(Default)] +pub struct DefaultHandler(FifoChannel); + +impl IntoHandler<'static, T> for DefaultHandler { + type Handler = >::Handler; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + self.0.into_handler() + } +} diff --git a/zenoh/src/handlers/ring.rs b/zenoh/src/handlers/ring.rs new file mode 100644 index 0000000000..341a3efadd --- /dev/null +++ b/zenoh/src/handlers/ring.rs @@ -0,0 +1,116 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +use crate::API_DATA_RECEPTION_CHANNEL_SIZE; + +use super::{callback::Callback, Dyn, IntoHandler}; +use std::sync::{Arc, Weak}; +use zenoh_collections::RingBuffer; +use zenoh_result::ZResult; + +/// A synchrounous ring channel with a limited size that allows users to keep the last N data. +pub struct RingChannel { + capacity: usize, +} + +impl RingChannel { + /// Initialize the RingBuffer with the capacity size. + pub fn new(capacity: usize) -> Self { + Self { capacity } + } +} + +impl Default for RingChannel { + fn default() -> Self { + Self::new(*API_DATA_RECEPTION_CHANNEL_SIZE) + } +} + +struct RingChannelInner { + ring: std::sync::Mutex>, + not_empty: flume::Receiver<()>, +} + +pub struct RingChannelHandler { + ring: Weak>, +} + +impl RingChannelHandler { + /// Receive from the ring channel. If the ring channel is empty, this call will block until an element is available in the channel. + pub fn recv(&self) -> ZResult { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + loop { + if let Some(t) = channel.ring.lock().map_err(|e| zerror!("{}", e))?.pull() { + return Ok(t); + } + channel.not_empty.recv().map_err(|e| zerror!("{}", e))?; + } + } + + /// Receive from the ring channel. If the ring channel is empty, this call will block until an element is available in the channel. + pub async fn recv_async(&self) -> ZResult { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + loop { + if let Some(t) = channel.ring.lock().map_err(|e| zerror!("{}", e))?.pull() { + return Ok(t); + } + channel + .not_empty + .recv_async() + .await + .map_err(|e| zerror!("{}", e))?; + } + } + + /// Try to receive from the ring channel. If the ring channel is empty, this call will return immediately without blocking. + pub fn try_recv(&self) -> ZResult> { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + let mut guard = channel.ring.lock().map_err(|e| zerror!("{}", e))?; + Ok(guard.pull()) + } +} + +impl IntoHandler<'static, T> for RingChannel { + type Handler = RingChannelHandler; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let (sender, receiver) = flume::bounded(1); + let inner = Arc::new(RingChannelInner { + ring: std::sync::Mutex::new(RingBuffer::new(self.capacity)), + not_empty: receiver, + }); + let receiver = RingChannelHandler { + ring: Arc::downgrade(&inner), + }; + ( + Dyn::new(move |t| match inner.ring.lock() { + Ok(mut g) => { + // Eventually drop the oldest element. + g.push_force(t); + drop(g); + let _ = sender.try_send(()); + } + Err(e) => log::error!("{}", e), + }), + receiver, + ) + } +} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index ea212485ec..90b4b2af58 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -214,7 +214,7 @@ where ScoutBuilder { what: what.into(), config: config.try_into().map_err(|e| e.into()), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 23e1846741..a28292fda2 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -155,7 +155,7 @@ impl<'a> Liveliness<'a> { LivelinessSubscriberBuilder { session: self.session.clone(), key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } @@ -198,7 +198,7 @@ impl<'a> Liveliness<'a> { session: &self.session, key_expr, timeout, - handler: DefaultHandler, + handler: DefaultHandler::default(), } } } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index f36e253636..e3d43993f3 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -518,7 +518,7 @@ impl<'a> Publisher<'a> { pub fn matching_listener(&self) -> MatchingListenerBuilder<'_, DefaultHandler> { MatchingListenerBuilder { publisher: PublisherRef::Borrow(self), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } @@ -623,7 +623,7 @@ impl PublisherDeclarations for std::sync::Arc> { fn matching_listener(&self) -> MatchingListenerBuilder<'static, DefaultHandler> { MatchingListenerBuilder { publisher: PublisherRef::Shared(self.clone()), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index ca5d44c3a6..3f1c382a66 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -302,7 +302,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), reliability: Reliability::DEFAULT, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } fn declare_queryable<'b, TryIntoKeyExpr>( @@ -318,7 +318,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { key_expr: key_expr.try_into().map_err(Into::into), complete: false, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } fn declare_publisher<'b, TryIntoKeyExpr>( @@ -814,7 +814,7 @@ impl Session { value: None, #[cfg(feature = "unstable")] attachment: None, - handler: DefaultHandler, + handler: DefaultHandler::default(), #[cfg(feature = "unstable")] source_info: SourceInfo::empty(), } @@ -1865,7 +1865,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { key_expr: key_expr.try_into().map_err(Into::into), reliability: Reliability::DEFAULT, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } @@ -1910,7 +1910,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { key_expr: key_expr.try_into().map_err(Into::into), complete: false, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index ceed15e2c3..57910bf3d6 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -14,12 +14,12 @@ #[test] fn pubsub_with_ringbuffer() { use std::{thread, time::Duration}; - use zenoh::{handlers::RingBuffer, prelude::sync::*}; + use zenoh::{handlers::RingChannel, prelude::sync::*}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let sub = zenoh .declare_subscriber("test/ringbuffer") - .with(RingBuffer::new(3)) + .with(RingChannel::new(3)) .res() .unwrap(); for i in 0..10 { @@ -32,7 +32,6 @@ fn pubsub_with_ringbuffer() { for i in 7..10 { assert_eq!( sub.recv() - .unwrap() .unwrap() .payload() .deserialize::() @@ -46,12 +45,12 @@ fn pubsub_with_ringbuffer() { #[test] fn query_with_ringbuffer() { - use zenoh::{handlers::RingBuffer, prelude::sync::*}; + use zenoh::{handlers::RingChannel, prelude::sync::*}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let queryable = zenoh .declare_queryable("test/ringbuffer_query") - .with(RingBuffer::new(1)) + .with(RingChannel::new(1)) .res() .unwrap(); @@ -66,7 +65,7 @@ fn query_with_ringbuffer() { .res() .unwrap(); - let query = queryable.recv().unwrap().unwrap(); + let query = queryable.recv().unwrap(); // Only receive the latest query assert_eq!( query From d6da7a852c48f464cd8d7ef2a2f962e382e36d63 Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Thu, 11 Apr 2024 20:10:09 +0200 Subject: [PATCH 205/357] Accessors for Value (#927) * accessors for Value * doctest fix * valgrind test fix --- .../src/queryable_get/bin/z_queryable_get.rs | 4 +- examples/examples/z_get.rs | 2 +- examples/examples/z_get_liveliness.rs | 2 +- examples/examples/z_queryable.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 13 ++++--- .../src/replica/align_queryable.rs | 4 +- .../src/replica/aligner.rs | 7 +--- .../src/replica/storage.rs | 34 ++++++++--------- zenoh/src/liveliness.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 4 +- zenoh/src/query.rs | 16 ++++++-- zenoh/src/queryable.rs | 14 +++---- zenoh/src/sample/mod.rs | 4 +- zenoh/src/value.rs | 38 +++++++------------ zenoh/tests/attachments.rs | 4 +- zenoh/tests/handler.rs | 2 +- zenoh/tests/session.rs | 2 +- 17 files changed, 72 insertions(+), 82 deletions(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index ea0f16399c..84c3a82f88 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -34,7 +34,7 @@ async fn main() { query .reply( query.selector().key_expr, - query.value().unwrap().payload.clone(), + query.value().unwrap().payload().clone(), ) .res() .await @@ -71,7 +71,7 @@ async fn main() { ), Err(err) => println!( ">> Received (ERROR: '{}')", - err.payload + err.payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)) ), diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 486346a8ea..77b67b90ed 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -55,7 +55,7 @@ async fn main() { } Err(err) => { let payload = err - .payload + .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!(">> Received (ERROR: '{}')", payload); diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 487f3c25d6..0a15b287c7 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -40,7 +40,7 @@ async fn main() { Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr().as_str(),), Err(err) => { let payload = err - .payload + .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!(">> Received (ERROR: '{}')", payload); diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 5113f1c2b7..5ef73d905b 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -44,7 +44,7 @@ async fn main() { None => println!(">> [Queryable ] Received Query '{}'", query.selector()), Some(value) => { let payload = value - .payload + .payload() .deserialize::() .unwrap_or_else(|e| format!("{}", e)); println!( diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 43c3f33776..6edcfdb945 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -97,8 +97,8 @@ fn result_to_json(sample: Result) -> JSONSample { Ok(sample) => sample_to_json(&sample), Err(err) => JSONSample { key: "ERROR".into(), - value: payload_to_json(&err.payload, &err.encoding), - encoding: err.encoding.to_string(), + value: payload_to_json(err.payload(), err.encoding()), + encoding: err.encoding().to_string(), time: None, }, } @@ -139,7 +139,7 @@ fn result_to_html(sample: Result) -> String { Err(err) => { format!( "

ERROR
\n
{}
\n", - err.payload.deserialize::>().unwrap_or_default() + err.payload().deserialize::>().unwrap_or_default() ) } } @@ -172,8 +172,11 @@ async fn to_raw_response(results: flume::Receiver) -> Response { ), Err(value) => response( StatusCode::Ok, - Cow::from(&value.encoding).as_ref(), - &value.payload.deserialize::>().unwrap_or_default(), + Cow::from(value.encoding()).as_ref(), + &value + .payload() + .deserialize::>() + .unwrap_or_default(), ), }, Err(_) => response(StatusCode::Ok, "", ""), diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 1ce6a1cb16..3a37095f67 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -126,8 +126,8 @@ impl AlignQueryable { } AlignData::Data(k, (v, ts)) => { query - .reply(k, v.payload) - .encoding(v.encoding) + .reply(k, v.payload().clone()) + .encoding(v.encoding().clone()) .timestamp(ts) .res() .await diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 64d5cfa1cd..f33b370200 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -106,11 +106,8 @@ impl Aligner { log::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { - let Value { - payload, encoding, .. - } = value; - let sample = SampleBuilder::put(key, payload) - .encoding(encoding) + let sample = SampleBuilder::put(key, value.payload().clone()) + .encoding(value.encoding().clone()) .timestamp(ts) .into(); log::debug!("[ALIGNER] Adding {:?} to storage", sample); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 93075170ac..d2c2984c21 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -301,12 +301,12 @@ impl StorageService { { match update.kind { SampleKind::Put => { - SampleBuilder::put(KeyExpr::from(k.clone()), update.data.value.payload) - .encoding(update.data.value.encoding) + SampleBuilder::put(k.clone(), update.data.value.payload().clone()) + .encoding(update.data.value.encoding().clone()) .timestamp(update.data.timestamp) .into() } - SampleKind::Delete => SampleBuilder::delete(KeyExpr::from(k.clone())) + SampleKind::Delete => SampleBuilder::delete(k.clone()) .timestamp(update.data.timestamp) .into(), } @@ -329,8 +329,10 @@ impl StorageService { storage .put( stripped_key, - Value::new(sample_to_store.payload().clone()) - .encoding(sample_to_store.encoding().clone()), + Value::new( + sample_to_store.payload().clone(), + sample_to_store.encoding().clone(), + ), *sample_to_store.timestamp().unwrap(), ) .await @@ -514,8 +516,8 @@ impl StorageService { Ok(stored_data) => { for entry in stored_data { if let Err(e) = q - .reply(key.clone(), entry.value.payload) - .encoding(entry.value.encoding) + .reply(key.clone(), entry.value.payload().clone()) + .encoding(entry.value.encoding().clone()) .timestamp(entry.timestamp) .res() .await @@ -546,8 +548,8 @@ impl StorageService { Ok(stored_data) => { for entry in stored_data { if let Err(e) = q - .reply(q.key_expr().clone(), entry.value.payload) - .encoding(entry.value.encoding) + .reply(q.key_expr().clone(), entry.value.payload().clone()) + .encoding(entry.value.encoding().clone()) .timestamp(entry.timestamp) .res() .await @@ -665,20 +667,14 @@ impl StorageService { fn serialize_update(update: &Update) -> String { let Update { kind, - data: - StoredData { - value: Value { - payload, encoding, .. - }, - timestamp, - }, + data: StoredData { value, timestamp }, } = update; - let zbuf: ZBuf = payload.into(); + let zbuf: ZBuf = value.payload().into(); let result = ( kind.to_string(), timestamp.to_string(), - encoding.to_string(), + value.encoding().to_string(), zbuf.slices().collect::>(), ); serde_json::to_string_pretty(&result).unwrap() @@ -690,7 +686,7 @@ fn construct_update(data: String) -> Update { for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).encoding(result.2); + let value = Value::new(payload, result.2); let data = StoredData { value, timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index a28292fda2..0b539ba636 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -608,7 +608,7 @@ where /// while let Ok(token) = tokens.recv_async().await { /// match token.sample { /// Ok(sample) => println!("Alive token ('{}')", sample.key_expr().as_str()), -/// Err(err) => println!("Received (ERROR: '{:?}')", err.payload), +/// Err(err) => println!("Received (ERROR: '{:?}')", err.payload()), /// } /// } /// # } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 62c38b16ee..5b5b41b390 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -426,9 +426,7 @@ impl Primitives for AdminSpace { inner: Arc::new(QueryInner { key_expr: key_expr.clone(), parameters, - value: query - .ext_body - .map(|b| Value::from(b.payload).encoding(b.encoding)), + value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), qid: msg.id, zid, primitives, diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 7b8da9f768..d089290326 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -172,13 +172,21 @@ impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { fn encoding>(self, encoding: T) -> Self { - let value = Some(self.value.unwrap_or_default().encoding(encoding)); - Self { value, ..self } + let mut value = self.value.unwrap_or_default(); + value.encoding = encoding.into(); + Self { + value: Some(value), + ..self + } } fn payload>(self, payload: T) -> Self { - let value = Some(self.value.unwrap_or_default().payload(payload)); - Self { value, ..self } + let mut value = self.value.unwrap_or_default(); + value.payload = payload.into(); + Self { + value: Some(value), + ..self + } } fn value>(self, value: T) -> Self { let value: Value = value.into(); diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index a6d87df5a4..7d36fe8f99 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -489,17 +489,15 @@ pub struct ReplyErrBuilder<'a> { impl ValueBuilderTrait for ReplyErrBuilder<'_> { fn encoding>(self, encoding: T) -> Self { - Self { - value: self.value.encoding(encoding), - ..self - } + let mut value = self.value.clone(); + value.encoding = encoding.into(); + Self { value, ..self } } fn payload>(self, payload: T) -> Self { - Self { - value: self.value.payload(payload), - ..self - } + let mut value = self.value.clone(); + value.payload = payload.into(); + Self { value, ..self } } fn value>(self, value: T) -> Self { diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index f4fb1e074a..7bb3fe9cde 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -16,7 +16,7 @@ use crate::encoding::Encoding; use crate::payload::Payload; use crate::prelude::{KeyExpr, Value}; -use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; +use crate::sample::builder::QoSBuilderTrait; use crate::time::Timestamp; use crate::Priority; #[cfg(feature = "unstable")] @@ -378,7 +378,7 @@ impl Sample { impl From for Value { fn from(sample: Sample) -> Self { - Value::new(sample.payload).encoding(sample.encoding) + Value::new(sample.payload, sample.encoding) } } diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 92a87cb6c5..d1b582111a 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,27 +13,26 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload, sample::builder::ValueBuilderTrait}; +use crate::{encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] #[derive(Clone, Debug, PartialEq, Eq)] pub struct Value { - /// The binary [`Payload`] of this [`Value`]. - pub payload: Payload, - /// The [`Encoding`] of this [`Value`]. - pub encoding: Encoding, + pub(crate) payload: Payload, + pub(crate) encoding: Encoding, } impl Value { - /// Creates a new [`Value`] with default [`Encoding`]. - pub fn new(payload: T) -> Self + /// Creates a new [`Value`] with specified [`Payload`] and [`Encoding`]. + pub fn new(payload: T, encoding: E) -> Self where T: Into, + E: Into, { Value { payload: payload.into(), - encoding: Encoding::default(), + encoding: encoding.into(), } } /// Creates an empty [`Value`]. @@ -48,24 +47,15 @@ impl Value { pub fn is_empty(&self) -> bool { self.payload.is_empty() && self.encoding == Encoding::default() } -} -impl ValueBuilderTrait for Value { - fn encoding>(self, encoding: T) -> Self { - Self { - encoding: encoding.into(), - ..self - } + /// Gets binary [`Payload`] of this [`Value`]. + pub fn payload(&self) -> &Payload { + &self.payload } - fn payload>(self, payload: T) -> Self { - Self { - payload: payload.into(), - ..self - } - } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { payload, encoding } + + /// Gets [`Encoding`] of this [`Value`]. + pub fn encoding(&self) -> &Encoding { + &self.encoding } } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 2a58749701..844e2985bc 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -66,7 +66,7 @@ fn attachment_queries() { .callback(|query| { let s = query .value() - .map(|q| q.payload.deserialize::().unwrap()) + .map(|q| q.payload().deserialize::().unwrap()) .unwrap_or_default(); println!("Query value: {}", s); @@ -82,7 +82,7 @@ fn attachment_queries() { query .reply( query.key_expr().clone(), - query.value().unwrap().payload.clone(), + query.value().unwrap().payload().clone(), ) .attachment(Attachment::from_iter( attachment diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index 57910bf3d6..ad6648dc27 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -71,7 +71,7 @@ fn query_with_ringbuffer() { query .value() .unwrap() - .payload + .payload() .deserialize::() .unwrap(), "query2" diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 267bb5c284..603ebdac49 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -230,7 +230,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let e = s.sample.unwrap_err(); - assert_eq!(e.payload.len(), size); + assert_eq!(e.payload().len(), size); cnt += 1; } } From de84b6fb56982791523a968c79ce73a3cc3fe20d Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 12 Apr 2024 14:23:04 +0200 Subject: [PATCH 206/357] Properties use Parameters internally --- Cargo.lock | 7 + commons/zenoh-collections/Cargo.toml | 2 + commons/zenoh-collections/src/lib.rs | 3 + commons/zenoh-collections/src/parameters.rs | 168 ++++++++++++++++ commons/zenoh-collections/src/properties.rs | 189 ++++++++++-------- commons/zenoh-protocol/Cargo.toml | 3 +- commons/zenoh-protocol/src/core/endpoint.rs | 147 ++------------ io/zenoh-links/zenoh-link-quic/Cargo.toml | 1 + io/zenoh-links/zenoh-link-quic/src/lib.rs | 9 +- io/zenoh-links/zenoh-link-tls/Cargo.toml | 1 + io/zenoh-links/zenoh-link-tls/src/lib.rs | 9 +- io/zenoh-links/zenoh-link-unixpipe/Cargo.toml | 1 + .../zenoh-link-unixpipe/src/unix/mod.rs | 6 +- io/zenoh-transport/src/multicast/manager.rs | 7 +- io/zenoh-transport/src/unicast/manager.rs | 11 +- zenoh/src/selector.rs | 2 + 16 files changed, 318 insertions(+), 248 deletions(-) create mode 100644 commons/zenoh-collections/src/parameters.rs diff --git a/Cargo.lock b/Cargo.lock index 09e598d878..07e166c57c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4553,6 +4553,9 @@ dependencies = [ [[package]] name = "zenoh-collections" version = "0.11.0-dev" +dependencies = [ + "rand 0.8.5", +] [[package]] name = "zenoh-config" @@ -4717,6 +4720,7 @@ dependencies = [ "tokio", "tokio-rustls 0.24.1", "tokio-util", + "zenoh-collections", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -4782,6 +4786,7 @@ dependencies = [ "tokio-rustls 0.25.0", "tokio-util", "webpki-roots", + "zenoh-collections", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -4826,6 +4831,7 @@ dependencies = [ "tokio-util", "unix-named-pipe", "zenoh-buffers", + "zenoh-collections", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -5005,6 +5011,7 @@ dependencies = [ "serde", "uhlc", "zenoh-buffers", + "zenoh-collections", "zenoh-keyexpr", "zenoh-result", ] diff --git a/commons/zenoh-collections/Cargo.toml b/commons/zenoh-collections/Cargo.toml index ca01d7460e..27787e8c6a 100644 --- a/commons/zenoh-collections/Cargo.toml +++ b/commons/zenoh-collections/Cargo.toml @@ -31,5 +31,7 @@ description = "Internal crate for zenoh." [features] default = ["std"] std = [] +test = ["rand"] [dependencies] +rand = { workspace = true, optional = true } diff --git a/commons/zenoh-collections/src/lib.rs b/commons/zenoh-collections/src/lib.rs index ea9a9209e6..6690d372da 100644 --- a/commons/zenoh-collections/src/lib.rs +++ b/commons/zenoh-collections/src/lib.rs @@ -20,6 +20,9 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; +pub mod parameters; +pub use parameters::*; + pub mod single_or_vec; pub use single_or_vec::*; diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs new file mode 100644 index 0000000000..6c34f6502d --- /dev/null +++ b/commons/zenoh-collections/src/parameters.rs @@ -0,0 +1,168 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +pub const LIST_SEPARATOR: char = ';'; +pub const FIELD_SEPARATOR: char = '='; +pub const VALUE_SEPARATOR: char = '|'; + +fn split_once(s: &str, c: char) -> (&str, &str) { + match s.find(c) { + Some(index) => { + let (l, r) = s.split_at(index); + (l, &r[1..]) + } + None => (s, ""), + } +} + +// tcp/localhost:7557?mymetadata=asdasd#myconfig=asdasd;asdasd=1;asdijabdiasd=1a + +/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g` +pub struct Parameters; + +impl Parameters { + pub fn iter(s: &str) -> impl DoubleEndedIterator { + s.split(LIST_SEPARATOR).filter_map(|prop| { + if prop.is_empty() { + None + } else { + Some(split_once(prop, FIELD_SEPARATOR)) + } + }) + } + + #[allow(clippy::should_implement_trait)] + pub fn from_iter<'s, I>(iter: I) -> String + where + I: Iterator, + { + let mut into = String::new(); + Self::from_iter_into(iter, &mut into); + into + } + + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + let mut from = iter.collect::>(); + from.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); + Self::extend_into(from.iter().copied(), into); + } + + pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { + Self::iter(s).find(|x| x.0 == k).map(|x| x.1) + } + + pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { + match Self::get(s, k) { + Some(v) => v.split(VALUE_SEPARATOR), + None => { + let mut i = "".split(VALUE_SEPARATOR); + i.next(); + i + } + } + } + + pub fn insert<'s, I>(mut iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + where + I: Iterator, + { + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + + let current = iter.filter(|x| x.0 != k); + let new = Some((k, v)).into_iter(); + let iter = current.chain(new); + (Parameters::concat(iter), item) + } + + pub fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) + where + I: Iterator, + { + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + let iter = iter.filter(|x| x.0 != k); + (Parameters::concat(iter), item) + } + + pub fn concat<'s, I>(iter: I) -> String + where + I: Iterator, + { + let mut into = String::new(); + Parameters::extend_into(iter, &mut into); + into + } + + pub fn extend_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + let mut first = into.is_empty(); + for (k, v) in iter { + if !first { + into.push(LIST_SEPARATOR); + } + into.push_str(k); + if !v.is_empty() { + into.push(FIELD_SEPARATOR); + into.push_str(v); + } + first = false; + } + } + + pub fn is_sorted<'s, I>(iter: I) -> bool + where + I: Iterator, + { + let mut prev = None; + for (k, _) in iter { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true + } + + #[cfg(feature = "test")] + pub fn rand(into: &mut String) { + use rand::{ + distributions::{Alphanumeric, DistString}, + Rng, + }; + + const MIN: usize = 2; + const MAX: usize = 8; + + let mut rng = rand::thread_rng(); + + let num = rng.gen_range(MIN..MAX); + for i in 0..num { + if i != 0 { + into.push(LIST_SEPARATOR); + } + let len = rng.gen_range(MIN..MAX); + let key = Alphanumeric.sample_string(&mut rng, len); + into.push_str(key.as_str()); + + into.push(FIELD_SEPARATOR); + + let len = rng.gen_range(MIN..MAX); + let value = Alphanumeric.sample_string(&mut rng, len); + into.push_str(value.as_str()); + } + } +} diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 281ac8ca68..030eca7d53 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -1,3 +1,6 @@ +use alloc::borrow::Cow; +use core::borrow::Borrow; + // // Copyright (c) 2022 ZettaScale Technology // @@ -11,128 +14,142 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::{ - collections::HashMap, - fmt, - ops::{Deref, DerefMut}, -}; - -const PROP_SEPS: &[&str] = &["\r\n", "\n", ";"]; -const DEFAULT_PROP_SEP: char = ';'; -const KV_SEP: char = '='; -const COMMENT_PREFIX: char = '#'; +use crate::Parameters; +use std::{collections::HashMap, fmt}; /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties /// and `=` as separator between a key and its value. Keys and values are trimed. #[non_exhaustive] #[derive(Clone, PartialEq, Eq, Default)] -pub struct Properties(HashMap); - -impl Deref for Properties { - type Target = HashMap; +pub struct Properties<'s>(Cow<'s, str>); - fn deref(&self) -> &Self::Target { +impl Properties<'_> { + pub fn as_str(&self) -> &str { &self.0 } -} -impl DerefMut for Properties { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 + pub fn get(&self, k: K) -> Option<&str> + where + K: Borrow, + { + Parameters::get(self.as_str(), k.borrow()) + } + + pub fn values(&self, k: K) -> impl DoubleEndedIterator + where + K: Borrow, + { + Parameters::values(self.as_str(), k.borrow()) + } + + pub fn iter(&self) -> impl DoubleEndedIterator { + Parameters::iter(self.as_str()) + } + + pub fn insert(&mut self, k: K, v: V) -> Option + where + K: Borrow, + V: Borrow, + { + let (inner, removed) = Parameters::insert(self.iter(), k.borrow(), v.borrow()); + let removed = removed.map(|s| s.to_string()); + self.0 = Cow::Owned(inner); + removed + } + + pub fn remove(&mut self, k: K) -> Option + where + K: Borrow, + { + let (inner, removed) = Parameters::remove(self.iter(), k.borrow()); + let removed = removed.map(|s| s.to_string()); + self.0 = Cow::Owned(inner); + removed } } -impl fmt::Display for Properties { - /// Format the Properties as a string, using `'='` for key/value separator - /// and `';'` for separator between each keys/values. - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut it = self.0.iter(); - if let Some((k, v)) = it.next() { - if v.is_empty() { - write!(f, "{k}")? - } else { - write!(f, "{k}{KV_SEP}{v}")? - } - for (k, v) in it { - if v.is_empty() { - write!(f, "{DEFAULT_PROP_SEP}{k}")? - } else { - write!(f, "{DEFAULT_PROP_SEP}{k}{KV_SEP}{v}")? - } - } +impl<'s> From<&'s str> for Properties<'s> { + fn from(value: &'s str) -> Self { + if Parameters::is_sorted(Parameters::iter(value)) { + Self(Cow::Borrowed(value)) + } else { + Self(Cow::Owned(Parameters::from_iter(Parameters::iter(value)))) } - Ok(()) } } -impl fmt::Debug for Properties { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{self}") +impl From for Properties<'_> { + fn from(value: String) -> Self { + if Parameters::is_sorted(Parameters::iter(value.as_str())) { + Self(Cow::Owned(value)) + } else { + Self(Cow::Owned(Parameters::from_iter(Parameters::iter( + value.as_str(), + )))) + } } } -impl From<&str> for Properties { - fn from(s: &str) -> Self { - let mut props = vec![s]; - for sep in PROP_SEPS { - props = props - .into_iter() - .flat_map(|s| s.split(sep)) - .collect::>(); - } - props = props.into_iter().map(str::trim).collect::>(); - let inner = props - .iter() - .filter_map(|prop| { - if prop.is_empty() || prop.starts_with(COMMENT_PREFIX) { - None - } else { - let mut it = prop.splitn(2, KV_SEP); - Some(( - it.next().unwrap().trim().to_string(), - it.next().unwrap_or("").trim().to_string(), - )) - } - }) - .collect(); - Self(inner) +impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Properties<'_> +where + K: AsRef + 's, + V: AsRef + 's, +{ + fn from_iter>(iter: T) -> Self { + let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref()))); + Self(Cow::Owned(inner)) } } -impl From for Properties { - fn from(s: String) -> Self { - Self::from(s.as_str()) +impl<'s, K, V> FromIterator<&'s (K, V)> for Properties<'_> +where + K: AsRef + 's, + V: AsRef + 's, +{ + fn from_iter>(iter: T) -> Self { + let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref()))); + Self(Cow::Owned(inner)) } } -impl From> for Properties { - fn from(map: HashMap) -> Self { - Self(map) +impl<'s, K, V> From<&'s [(K, V)]> for Properties<'_> +where + K: AsRef + 's, + V: AsRef + 's, +{ + fn from(value: &'s [(K, V)]) -> Self { + Self::from_iter(value.iter()) } } -impl From<&[(&str, &str)]> for Properties { - fn from(kvs: &[(&str, &str)]) -> Self { - let inner = kvs - .iter() - .map(|(k, v)| ((*k).to_string(), (*v).to_string())) - .collect(); - Self(inner) +impl From> for Properties<'_> +where + K: AsRef, + V: AsRef, +{ + fn from(map: HashMap) -> Self { + Self::from_iter(map.iter()) } } -impl TryFrom<&std::path::Path> for Properties { - type Error = std::io::Error; +impl From> for HashMap { + fn from(props: Properties) -> Self { + HashMap::from_iter( + Parameters::iter(props.as_str()).map(|(k, v)| (k.to_string(), v.to_string())), + ) + } +} - fn try_from(p: &std::path::Path) -> Result { - Ok(Self::from(std::fs::read_to_string(p)?)) +impl fmt::Display for Properties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) } } -impl From for HashMap { - fn from(props: Properties) -> Self { - props.0 +impl fmt::Debug for Properties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) } } diff --git a/commons/zenoh-protocol/Cargo.toml b/commons/zenoh-protocol/Cargo.toml index 9d7e35d690..2c3a36b7a7 100644 --- a/commons/zenoh-protocol/Cargo.toml +++ b/commons/zenoh-protocol/Cargo.toml @@ -33,7 +33,7 @@ std = [ "zenoh-keyexpr/std", "zenoh-result/std", ] -test = ["rand", "zenoh-buffers/test"] +test = ["rand", "zenoh-buffers/test", "zenoh-collections/test"] shared-memory = ["std", "zenoh-buffers/shared-memory"] stats = [] @@ -43,6 +43,7 @@ rand = { workspace = true, features = ["alloc", "getrandom"], optional = true } serde = { workspace = true, features = ["alloc"] } uhlc = { workspace = true, default-features = false } zenoh-buffers = { workspace = true, default-features = false } +zenoh-collections = { workspace = true, default-features = false } zenoh-keyexpr = { workspace = true } zenoh-result = { workspace = true } diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index a8fcb3ae98..898ee615e6 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -12,27 +12,15 @@ // ZettaScale Zenoh Team, // use super::locator::*; -use alloc::{borrow::ToOwned, format, string::String, vec::Vec}; +use alloc::{borrow::ToOwned, format, string::String}; use core::{convert::TryFrom, fmt, str::FromStr}; +use zenoh_collections::Parameters; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; // Parsing chars pub const PROTO_SEPARATOR: char = '/'; pub const METADATA_SEPARATOR: char = '?'; -pub const LIST_SEPARATOR: char = ';'; -pub const FIELD_SEPARATOR: char = '='; pub const CONFIG_SEPARATOR: char = '#'; -pub const VALUE_SEPARATOR: char = '|'; - -fn split_once(s: &str, c: char) -> (&str, &str) { - match s.find(c) { - Some(index) => { - let (l, r) = s.split_at(index); - (l, &r[1..]) - } - None => (s, ""), - } -} // Parsing functions pub(super) fn protocol(s: &str) -> &str { @@ -64,77 +52,6 @@ pub(super) fn config(s: &str) -> &str { } } -pub struct Parameters; - -impl Parameters { - pub fn extend<'s, I>(iter: I, into: &mut String) - where - I: Iterator, - { - let mut first = into.is_empty(); - for (k, v) in iter { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } - } - - pub fn iter(s: &str) -> impl DoubleEndedIterator { - s.split(LIST_SEPARATOR).filter_map(|prop| { - if prop.is_empty() { - None - } else { - Some(split_once(prop, FIELD_SEPARATOR)) - } - }) - } - - pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { - Self::iter(s).find(|x| x.0 == k).map(|x| x.1) - } - - pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { - match Self::get(s, k) { - Some(v) => v.split(VALUE_SEPARATOR), - None => { - let mut i = "".split(VALUE_SEPARATOR); - i.next(); - i - } - } - } - - pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> String - where - I: Iterator, - { - let current = iter.filter(|x| x.0 != k); - let new = Some((k, v)).into_iter(); - let iter = current.chain(new); - - let mut into = String::new(); - Parameters::extend(iter, &mut into); - into - } - - pub(super) fn remove<'s, I>(iter: I, k: &'s str) -> String - where - I: Iterator, - { - let iter = iter.filter(|x| x.0 != k); - - let mut into = String::new(); - Parameters::extend(iter, &mut into); - into - } -} - // Protocol #[repr(transparent)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] @@ -341,7 +258,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::insert(self.0.metadata().iter(), k, v), + Parameters::insert(self.0.metadata().iter(), k, v).0, self.0.config(), )?; @@ -353,7 +270,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::remove(self.0.metadata().iter(), k), + Parameters::remove(self.0.metadata().iter(), k).0, self.0.config(), )?; @@ -459,7 +376,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::insert(self.0.config().iter(), k, v), + Parameters::insert(self.0.config().iter(), k, v).0, )?; self.0.inner = ep.inner; @@ -471,7 +388,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::remove(self.0.config().iter(), k), + Parameters::remove(self.0.config().iter(), k).0, )?; self.0.inner = ep.inner; @@ -621,27 +538,6 @@ impl TryFrom for EndPoint { const ERR: &str = "Endpoints must be of the form /
[?][#]"; - fn sort_hashmap(from: &str, into: &mut String) { - let mut from = from - .split(LIST_SEPARATOR) - .map(|p| split_once(p, FIELD_SEPARATOR)) - .collect::>(); - from.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); - - let mut first = true; - for (k, v) in from.iter() { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } - } - let pidx = s .find(PROTO_SEPARATOR) .and_then(|i| (!s[..i].is_empty() && !s[i + 1..].is_empty()).then_some(i)) @@ -654,14 +550,14 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - sort_hashmap(&s[midx + 1..], &mut inner); + Parameters::from_iter_into(Parameters::iter(&s[midx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some config (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - sort_hashmap(&s[cidx + 1..], &mut inner); + Parameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some metadata and some config @@ -674,10 +570,10 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - sort_hashmap(&s[midx + 1..cidx], &mut inner); + Parameters::from_iter_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); inner.push(CONFIG_SEPARATOR); - sort_hashmap(&s[cidx + 1..], &mut inner); + Parameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } @@ -699,31 +595,12 @@ impl EndPoint { pub fn rand() -> Self { use rand::{ distributions::{Alphanumeric, DistString}, - rngs::ThreadRng, Rng, }; const MIN: usize = 2; const MAX: usize = 8; - fn gen_hashmap(rng: &mut ThreadRng, endpoint: &mut String) { - let num = rng.gen_range(MIN..MAX); - for i in 0..num { - if i != 0 { - endpoint.push(LIST_SEPARATOR); - } - let len = rng.gen_range(MIN..MAX); - let key = Alphanumeric.sample_string(rng, len); - endpoint.push_str(key.as_str()); - - endpoint.push(FIELD_SEPARATOR); - - let len = rng.gen_range(MIN..MAX); - let value = Alphanumeric.sample_string(rng, len); - endpoint.push_str(value.as_str()); - } - } - let mut rng = rand::thread_rng(); let mut endpoint = String::new(); @@ -739,11 +616,11 @@ impl EndPoint { if rng.gen_bool(0.5) { endpoint.push(METADATA_SEPARATOR); - gen_hashmap(&mut rng, &mut endpoint); + Parameters::rand(&mut endpoint); } if rng.gen_bool(0.5) { endpoint.push(CONFIG_SEPARATOR); - gen_hashmap(&mut rng, &mut endpoint); + Parameters::rand(&mut endpoint); } endpoint.parse().unwrap() diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index 496830b5ef..421db99e25 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -36,6 +36,7 @@ rustls-webpki = { workspace = true } secrecy = {workspace = true } tokio = { workspace = true, features = ["io-util", "net", "fs", "sync", "time"] } tokio-util = { workspace = true, features = ["rt"] } +zenoh-collections = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index 4bcabaf5b6..7f5e2a1587 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -25,14 +25,12 @@ use config::{ }; use secrecy::ExposeSecret; use std::net::SocketAddr; +use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; use zenoh_protocol::{ - core::{ - endpoint::{Address, Parameters}, - Locator, - }, + core::{endpoint::Address, Locator}, transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; @@ -131,8 +129,7 @@ impl ConfigurationInspector for QuicConfigurator { }; } - let mut s = String::new(); - Parameters::extend(ps.drain(..), &mut s); + let s = Parameters::from_iter(ps.drain(..)); Ok(s) } diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index 975fa49467..d164476e22 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -38,6 +38,7 @@ tokio = { workspace = true, features = ["io-util", "net", "fs", "sync"] } tokio-rustls = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } webpki-roots = { workspace = true } +zenoh-collections = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-tls/src/lib.rs b/io/zenoh-links/zenoh-link-tls/src/lib.rs index 7faebb4cd9..dae8227cad 100644 --- a/io/zenoh-links/zenoh-link-tls/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tls/src/lib.rs @@ -27,14 +27,12 @@ use config::{ use rustls_pki_types::ServerName; use secrecy::ExposeSecret; use std::{convert::TryFrom, net::SocketAddr}; +use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; use zenoh_protocol::{ - core::{ - endpoint::{self, Address}, - Locator, - }, + core::{endpoint::Address, Locator}, transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; @@ -166,8 +164,7 @@ impl ConfigurationInspector for TlsConfigurator { }; } - let mut s = String::new(); - endpoint::Parameters::extend(ps.drain(..), &mut s); + let s = Parameters::from_iter(ps.drain(..)); Ok(s) } diff --git a/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml b/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml index 66784728f9..84e083caf8 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml +++ b/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml @@ -32,6 +32,7 @@ async-trait = { workspace = true } log = { workspace = true } rand = { workspace = true, features = ["default"] } zenoh-buffers = { workspace = true } +zenoh-collections = { workspace = true } zenoh-core = { workspace = true } zenoh-config = { workspace = true } zenoh-link-commons = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs index bcafaaba3c..70d3d4dddc 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs @@ -21,10 +21,11 @@ pub mod unicast; use async_trait::async_trait; pub use unicast::*; +use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{Locator, Parameters}; +use zenoh_protocol::core::Locator; use zenoh_result::ZResult; pub const UNIXPIPE_LOCATOR_PREFIX: &str = "unixpipe"; @@ -56,8 +57,7 @@ impl ConfigurationInspector for UnixPipeConfigurator { properties.push((config::FILE_ACCESS_MASK, &file_access_mask_)); } - let mut s = String::new(); - Parameters::extend(properties.drain(..), &mut s); + let s = Parameters::from_iter(properties.drain(..)); Ok(s) } diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index b9b594205f..daf49ce9a3 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use tokio::sync::Mutex; +use zenoh_collections::Parameters; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionMulticastConf; #[cfg(feature = "shared-memory")] @@ -27,7 +28,7 @@ use zenoh_config::{Config, LinkTxConf}; use zenoh_core::zasynclock; use zenoh_link::*; use zenoh_protocol::core::ZenohId; -use zenoh_protocol::{core::endpoint, transport::close}; +use zenoh_protocol::transport::close; use zenoh_result::{bail, zerror, ZResult}; pub struct TransportManagerConfigMulticast { @@ -259,9 +260,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint - .config_mut() - .extend(endpoint::Parameters::iter(config))?; + endpoint.config_mut().extend(Parameters::iter(config))?; } // Open the link diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 8a63f4f630..eb0339c35b 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -36,6 +36,7 @@ use std::{ time::Duration, }; use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; +use zenoh_collections::Parameters; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionUnicastConf; #[cfg(feature = "shared-memory")] @@ -45,7 +46,7 @@ use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; use zenoh_link::*; use zenoh_protocol::{ - core::{endpoint, ZenohId}, + core::ZenohId, transport::{close, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; @@ -379,9 +380,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint - .config_mut() - .extend(endpoint::Parameters::iter(config))?; + endpoint.config_mut().extend(Parameters::iter(config))?; }; manager.new_listener(endpoint).await } @@ -690,9 +689,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint - .config_mut() - .extend(endpoint::Parameters::iter(config))?; + endpoint.config_mut().extend(Parameters::iter(config))?; }; // Create a new link associated by calling the Link Manager diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 2a9a38c02c..a9e8941b33 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -85,6 +85,7 @@ impl<'a> Selector<'a> { { self.decode_into_map() } + /// Extracts the selector parameters' name-value pairs into a hashmap, returning an error in case of duplicated parameters. pub fn parameters_cowmap(&'a self) -> ZResult, Cow<'a, str>>> { self.decode_into_map() @@ -185,6 +186,7 @@ impl<'a> Selector<'a> { selector.drain(splice_start..(splice_end + (splice_end != selector.len()) as usize)); } } + #[cfg(any(feature = "unstable", test))] pub(crate) fn parameter_index(&self, param_name: &str) -> ZResult> { let starts_with_param = |s: &str| { From 434c719f3a75a06a6252e3fa69737e10fbb8a936 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 12 Apr 2024 18:49:46 +0200 Subject: [PATCH 207/357] Rework Selector to use Properties --- commons/zenoh-collections/src/parameters.rs | 78 +-- commons/zenoh-collections/src/properties.rs | 76 ++- commons/zenoh-protocol/src/core/endpoint.rs | 4 +- commons/zenoh-util/src/std_only/time_range.rs | 13 + plugins/zenoh-plugin-rest/src/lib.rs | 4 +- .../src/replica/align_queryable.rs | 10 +- .../src/replica/storage.rs | 4 +- zenoh/src/net/runtime/adminspace.rs | 17 +- zenoh/src/prelude.rs | 2 +- zenoh/src/query.rs | 7 +- zenoh/src/queryable.rs | 11 +- zenoh/src/selector.rs | 471 ++++++------------ zenoh/src/session.rs | 19 +- zenoh/tests/session.rs | 2 +- 14 files changed, 323 insertions(+), 395 deletions(-) diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs index 6c34f6502d..536f2beb2a 100644 --- a/commons/zenoh-collections/src/parameters.rs +++ b/commons/zenoh-collections/src/parameters.rs @@ -25,13 +25,11 @@ fn split_once(s: &str, c: char) -> (&str, &str) { } } -// tcp/localhost:7557?mymetadata=asdasd#myconfig=asdasd;asdasd=1;asdijabdiasd=1a - -/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g` +/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. pub struct Parameters; impl Parameters { - pub fn iter(s: &str) -> impl DoubleEndedIterator { + pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { s.split(LIST_SEPARATOR).filter_map(|prop| { if prop.is_empty() { None @@ -55,13 +53,17 @@ impl Parameters { where I: Iterator, { - let mut from = iter.collect::>(); + let mut from = iter + .filter(|(k, _)| !k.is_empty()) + .collect::>(); from.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); - Self::extend_into(from.iter().copied(), into); + Self::concat_into(from.iter().copied(), into); } pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { - Self::iter(s).find(|x| x.0 == k).map(|x| x.1) + Self::iter(s) + .find(|(key, _)| *key == k) + .map(|(_, value)| value) } pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { @@ -75,16 +77,17 @@ impl Parameters { } } - pub fn insert<'s, I>(mut iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) where - I: Iterator, + I: Iterator + Clone, { - let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + let mut ic = iter.clone(); + let item = ic.find(|(key, _)| *key == k).map(|(_, v)| v); let current = iter.filter(|x| x.0 != k); let new = Some((k, v)).into_iter(); let iter = current.chain(new); - (Parameters::concat(iter), item) + (Parameters::from_iter(iter), item) } pub fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) @@ -96,31 +99,23 @@ impl Parameters { (Parameters::concat(iter), item) } - pub fn concat<'s, I>(iter: I) -> String + pub fn extend<'s, C, N>(current: C, new: N) -> String where - I: Iterator, + C: Iterator, + N: Iterator, { let mut into = String::new(); - Parameters::extend_into(iter, &mut into); + Parameters::extend_into(current, new, &mut into); into } - pub fn extend_into<'s, I>(iter: I, into: &mut String) + pub fn extend_into<'s, C, N>(current: C, new: N, into: &mut String) where - I: Iterator, + C: Iterator, + N: Iterator, { - let mut first = into.is_empty(); - for (k, v) in iter { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } + let iter = current.chain(new); + Parameters::from_iter_into(iter, into); } pub fn is_sorted<'s, I>(iter: I) -> bool @@ -137,6 +132,33 @@ impl Parameters { true } + fn concat<'s, I>(iter: I) -> String + where + I: Iterator, + { + let mut into = String::new(); + Parameters::concat_into(iter, &mut into); + into + } + + fn concat_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + let mut first = into.is_empty(); + for (k, v) in iter { + if !first { + into.push(LIST_SEPARATOR); + } + into.push_str(k); + if !v.is_empty() { + into.push(FIELD_SEPARATOR); + into.push_str(v); + } + first = false; + } + } + #[cfg(feature = "test")] pub fn rand(into: &mut String) { use rand::{ diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 030eca7d53..026dd69f72 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -1,6 +1,3 @@ -use alloc::borrow::Cow; -use core::borrow::Borrow; - // // Copyright (c) 2022 ZettaScale Technology // @@ -14,8 +11,11 @@ use core::borrow::Borrow; // Contributors: // ZettaScale Zenoh Team, // -use crate::Parameters; -use std::{collections::HashMap, fmt}; +use crate::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; +use alloc::borrow::Cow; +use core::{borrow::Borrow, fmt}; +#[cfg(feature = "std")] +use std::collections::HashMap; /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties @@ -25,10 +25,21 @@ use std::{collections::HashMap, fmt}; pub struct Properties<'s>(Cow<'s, str>); impl Properties<'_> { + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + pub fn as_str(&self) -> &str { &self.0 } + pub fn contains_key(&self, k: K) -> bool + where + K: Borrow, + { + self.get(k).is_some() + } + pub fn get(&self, k: K) -> Option<&str> where K: Borrow, @@ -43,7 +54,7 @@ impl Properties<'_> { Parameters::values(self.as_str(), k.borrow()) } - pub fn iter(&self) -> impl DoubleEndedIterator { + pub fn iter(&self) -> impl DoubleEndedIterator + Clone { Parameters::iter(self.as_str()) } @@ -67,11 +78,31 @@ impl Properties<'_> { self.0 = Cow::Owned(inner); removed } + + pub fn extend<'s, I, K, V>(&mut self, iter: I) + where + I: IntoIterator, + // I::Item: std::borrow::Borrow<(K, V)>, + K: AsRef + 's, + V: AsRef + 's, + { + self.0 = Cow::Owned(Parameters::extend( + Parameters::iter(self.as_str()), + iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref())), + )); + } + + pub fn into_owned(self) -> Properties<'static> { + Properties(Cow::Owned(self.0.into_owned())) + } } impl<'s> From<&'s str> for Properties<'s> { - fn from(value: &'s str) -> Self { + fn from(mut value: &'s str) -> Self { if Parameters::is_sorted(Parameters::iter(value)) { + value = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); Self(Cow::Borrowed(value)) } else { Self(Cow::Owned(Parameters::from_iter(Parameters::iter(value)))) @@ -80,8 +111,12 @@ impl<'s> From<&'s str> for Properties<'s> { } impl From for Properties<'_> { - fn from(value: String) -> Self { + fn from(mut value: String) -> Self { if Parameters::is_sorted(Parameters::iter(value.as_str())) { + let s = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + value.truncate(s.len()); Self(Cow::Owned(value)) } else { Self(Cow::Owned(Parameters::from_iter(Parameters::iter( @@ -123,6 +158,7 @@ where } } +#[cfg(feature = "std")] impl From> for Properties<'_> where K: AsRef, @@ -133,14 +169,29 @@ where } } -impl From> for HashMap { - fn from(props: Properties) -> Self { +#[cfg(feature = "std")] +impl<'s> From<&'s Properties<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s Properties<'s>) -> Self { + HashMap::from_iter(Parameters::iter(props.as_str())) + } +} + +#[cfg(feature = "std")] +impl From<&Properties<'_>> for HashMap { + fn from(props: &Properties<'_>) -> Self { HashMap::from_iter( Parameters::iter(props.as_str()).map(|(k, v)| (k.to_string(), v.to_string())), ) } } +#[cfg(feature = "std")] +impl From> for HashMap { + fn from(props: Properties) -> Self { + HashMap::from(&props) + } +} + impl fmt::Display for Properties<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) @@ -173,6 +224,11 @@ mod tests { Properties::from(&[("p1", "v1"), ("p2", "v2")][..]) ); + assert_eq!( + Properties::from("p1=v1;p2=v2;|="), + Properties::from(&[("p1", "v1"), ("p2", "v2")][..]) + ); + assert_eq!( Properties::from("p1=v1;p2;p3=v3"), Properties::from(&[("p1", "v1"), ("p2", ""), ("p3", "v3")][..]) diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 898ee615e6..03678fb675 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -194,7 +194,7 @@ impl<'a> Metadata<'a> { self.as_str().is_empty() } - pub fn iter(&'a self) -> impl DoubleEndedIterator { + pub fn iter(&'a self) -> impl DoubleEndedIterator + Clone { Parameters::iter(self.0) } @@ -311,7 +311,7 @@ impl<'a> Config<'a> { self.as_str().is_empty() } - pub fn iter(&'a self) -> impl DoubleEndedIterator { + pub fn iter(&'a self) -> impl DoubleEndedIterator + Clone { Parameters::iter(self.0) } diff --git a/commons/zenoh-util/src/std_only/time_range.rs b/commons/zenoh-util/src/std_only/time_range.rs index 50e5542fcc..9cfaf32655 100644 --- a/commons/zenoh-util/src/std_only/time_range.rs +++ b/commons/zenoh-util/src/std_only/time_range.rs @@ -51,6 +51,7 @@ const W_TO_SECS: f64 = D_TO_SECS * 7.0; /// iteratively getting values for `[t0..t1[`, `[t1..t2[`, `[t2..t3[`... #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct TimeRange(pub TimeBound, pub TimeBound); + impl TimeRange { /// Resolves the offset bounds in the range using `now` as reference. pub fn resolve_at(self, now: SystemTime) -> TimeRange { @@ -81,6 +82,7 @@ impl TimeRange { } } } + impl TimeRange { /// Returns `true` if the provided `instant` belongs to `self`. pub fn contains(&self, instant: SystemTime) -> bool { @@ -96,17 +98,20 @@ impl TimeRange { } } } + impl From> for TimeRange { fn from(value: TimeRange) -> Self { TimeRange(value.0.into(), value.1.into()) } } + impl TryFrom> for TimeRange { type Error = (); fn try_from(value: TimeRange) -> Result { Ok(TimeRange(value.0.try_into()?, value.1.try_into()?)) } } + impl Display for TimeRange { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.0 { @@ -121,6 +126,7 @@ impl Display for TimeRange { } } } + impl Display for TimeRange { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.0 { @@ -195,6 +201,7 @@ pub enum TimeBound { Exclusive(T), Unbounded, } + impl From> for TimeBound { fn from(value: TimeBound) -> Self { match value { @@ -204,6 +211,7 @@ impl From> for TimeBound { } } } + impl TryFrom> for TimeBound { type Error = (); fn try_from(value: TimeBound) -> Result { @@ -214,6 +222,7 @@ impl TryFrom> for TimeBound { }) } } + impl TimeBound { /// Resolves `self` into a [`TimeBound`], using `now` as a reference for offset expressions. /// If `self` is time boundary that cannot be represented as `SystemTime` (which means it’s not inside @@ -238,11 +247,13 @@ pub enum TimeExpr { Fixed(SystemTime), Now { offset_secs: f64 }, } + impl From for TimeExpr { fn from(t: SystemTime) -> Self { Self::Fixed(t) } } + impl TryFrom for SystemTime { type Error = (); fn try_from(value: TimeExpr) -> Result { @@ -252,6 +263,7 @@ impl TryFrom for SystemTime { } } } + impl TimeExpr { /// Resolves `self` into a [`SystemTime`], using `now` as a reference for offset expressions. /// @@ -295,6 +307,7 @@ impl TimeExpr { } } } + impl Display for TimeExpr { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 43c3f33776..fc74ca5421 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -407,12 +407,12 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result Option { - let properties = selector.parameters_stringmap().unwrap(); // note: this is a hashmap + let properties = selector.parameters(); // note: this is a hashmap log::trace!("[ALIGN QUERYABLE] Properties are: {:?}", properties); - if properties.get(super::ERA).is_some() { + if properties.contains_key(super::ERA) { Some(AlignComponent::Era( EraType::from_str(properties.get(super::ERA).unwrap()).unwrap(), )) - } else if properties.get(super::INTERVALS).is_some() { + } else if properties.contains_key(super::INTERVALS) { let mut intervals = properties.get(super::INTERVALS).unwrap().to_string(); intervals.remove(0); intervals.pop(); @@ -206,7 +206,7 @@ impl AlignQueryable { .map(|x| x.parse::().unwrap()) .collect::>(), )) - } else if properties.get(super::SUBINTERVALS).is_some() { + } else if properties.contains_key(super::SUBINTERVALS) { let mut subintervals = properties.get(super::SUBINTERVALS).unwrap().to_string(); subintervals.remove(0); subintervals.pop(); @@ -216,7 +216,7 @@ impl AlignQueryable { .map(|x| x.parse::().unwrap()) .collect::>(), )) - } else if properties.get(super::CONTENTS).is_some() { + } else if properties.contains_key(super::CONTENTS) { let contents = serde_json::from_str(properties.get(super::CONTENTS).unwrap()).unwrap(); Some(AlignComponent::Contents(contents)) } else { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 93075170ac..b957655579 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -510,7 +510,7 @@ impl StorageService { return; } }; - match storage.get(stripped_key, q.parameters()).await { + match storage.get(stripped_key, q.parameters().as_str()).await { Ok(stored_data) => { for entry in stored_data { if let Err(e) = q @@ -542,7 +542,7 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - match storage.get(stripped_key, q.parameters()).await { + match storage.get(stripped_key, q.parameters().as_str()).await { Ok(stored_data) => { for entry in stored_data { if let Err(e) = q diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 62c38b16ee..2f066c63bc 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -421,11 +421,10 @@ impl Primitives for AdminSpace { }; let zid = self.zid; - let parameters = query.parameters.to_owned(); let query = Query { inner: Arc::new(QueryInner { key_expr: key_expr.clone(), - parameters, + parameters: query.parameters.into(), value: query .ext_body .map(|b| Value::from(b.payload).encoding(b.encoding)), @@ -530,8 +529,11 @@ fn router_data(context: &AdminContext, query: Query) { }); #[cfg(feature = "stats")] { - let stats = crate::prelude::Parameters::decode(&query.selector()) - .any(|(k, v)| k.as_ref() == "_stats" && v != "false"); + let stats = query + .selector() + .parameters() + .iter() + .any(|(k, v)| k == "_stats" && v != "false"); if stats { json.as_object_mut().unwrap().insert( "stats".to_string(), @@ -561,8 +563,11 @@ fn router_data(context: &AdminContext, query: Query) { #[cfg(feature = "stats")] { - let stats = crate::prelude::Parameters::decode(&query.selector()) - .any(|(k, v)| k.as_ref() == "_stats" && v != "false"); + let stats = query + .selector() + .parameters() + .iter() + .any(|(k, v)| k == "_stats" && v != "false"); if stats { json.as_object_mut().unwrap().insert( "stats".to_string(), diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index e2327c0dcc..9158425034 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -38,7 +38,7 @@ pub(crate) mod common { pub use crate::config::{self, Config, ValidatedMap}; pub use crate::handlers::IntoHandler; - pub use crate::selector::{Parameter, Parameters, Selector}; + pub use crate::selector::Selector; pub use crate::session::{Session, SessionDeclarations}; pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 7b8da9f768..20c76fa15b 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -378,9 +378,10 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { #[zenoh_macros::unstable] pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { Self { - selector: self - .selector - .and_then(|s| s.accept_any_keyexpr(accept == ReplyKeyExpr::Any)), + selector: self.selector.map(|mut s| { + s.set_accept_any_keyexpr(accept == ReplyKeyExpr::Any); + s + }), ..self } } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index a6d87df5a4..36a2c51ba0 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -20,6 +20,7 @@ use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::builder::SampleBuilder; use crate::sample::QoSBuilder; +use crate::selector::Parameters; use crate::Id; use crate::SessionRef; use crate::Undeclarable; @@ -46,7 +47,7 @@ pub(crate) struct QueryInner { /// The key expression of this Query. pub(crate) key_expr: KeyExpr<'static>, /// This Query's selector parameters. - pub(crate) parameters: String, + pub(crate) parameters: Parameters<'static>, /// This Query's body. pub(crate) value: Option, @@ -80,7 +81,7 @@ impl Query { pub fn selector(&self) -> Selector<'_> { Selector { key_expr: self.inner.key_expr.clone(), - parameters: (&self.inner.parameters).into(), + parameters: self.inner.parameters.clone(), } } @@ -92,7 +93,7 @@ impl Query { /// This Query's selector parameters. #[inline(always)] - pub fn parameters(&self) -> &str { + pub fn parameters(&self) -> &Parameters { &self.inner.parameters } @@ -220,9 +221,7 @@ impl Query { }) } fn _accepts_any_replies(&self) -> ZResult { - self.parameters() - .get_bools([crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]) - .map(|a| a[0]) + Ok(self.parameters().accept_any_keyexpr()?.unwrap_or(false)) } } diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index a9e8941b33..7d5326638e 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -14,19 +14,17 @@ //! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; -use zenoh_result::ZResult; -pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; - use crate::{prelude::KeyExpr, queryable::Query}; - use std::{ - borrow::{Borrow, Cow}, collections::HashMap, convert::TryFrom, - hash::Hash, + ops::{Deref, DerefMut}, str::FromStr, }; +use zenoh_collections::Properties; +use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_result::ZResult; +use zenoh_util::time_range::TimeRange; /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters @@ -67,73 +65,42 @@ pub struct Selector<'a> { /// The part of this selector identifying which keys should be part of the selection. pub key_expr: KeyExpr<'a>, /// the part of this selector identifying which values should be part of the selection. - pub(crate) parameters: Cow<'a, str>, + pub(crate) parameters: Parameters<'a>, } pub const TIME_RANGE_KEY: &str = "_time"; impl<'a> Selector<'a> { - /// Gets the parameters as a raw string. - pub fn parameters(&self) -> &str { + /// Gets the parameters. + pub fn parameters(&self) -> &Parameters { &self.parameters } - /// Extracts the selector parameters into a hashmap, returning an error in case of duplicated parameter names. - pub fn parameters_map(&'a self) -> ZResult> - where - K: AsRef + std::hash::Hash + std::cmp::Eq, - ExtractedName<'a, Self>: Into, - ExtractedValue<'a, Self>: Into, - { - self.decode_into_map() - } - /// Extracts the selector parameters' name-value pairs into a hashmap, returning an error in case of duplicated parameters. - pub fn parameters_cowmap(&'a self) -> ZResult, Cow<'a, str>>> { - self.decode_into_map() - } - /// Extracts the selector parameters' name-value pairs into a hashmap, returning an error in case of duplicated parameters. - pub fn parameters_stringmap(&'a self) -> ZResult> { - self.decode_into_map() - } /// Gets a mutable reference to the parameters as a String. /// /// Note that calling this function may cause an allocation and copy if the selector's parameters wasn't /// already owned by `self`. `self` owns its parameters as soon as this function returns. - pub fn parameters_mut(&mut self) -> &mut String { - if let Cow::Borrowed(s) = self.parameters { - self.parameters = Cow::Owned(s.to_owned()) - } - if let Cow::Owned(s) = &mut self.parameters { - s - } else { - unsafe { std::hint::unreachable_unchecked() } // this is safe because we just replaced the borrowed variant - } - } - pub fn set_parameters(&mut self, selector: impl Into>) { - self.parameters = selector.into(); - } - pub fn borrowing_clone(&'a self) -> Self { - Selector { - key_expr: self.key_expr.clone(), - parameters: self.parameters.as_ref().into(), - } + pub fn parameters_mut(&mut self) -> &mut Parameters<'a> { + &mut self.parameters } + + /// Create an owned version of this selector with `'static` lifetime. pub fn into_owned(self) -> Selector<'static> { Selector { key_expr: self.key_expr.into_owned(), - parameters: self.parameters.into_owned().into(), + parameters: Parameters(self.parameters.0.into_owned()), } } - #[deprecated = "If you have ownership of this selector, prefer `Selector::into_owned`"] - pub fn to_owned(&self) -> Selector<'static> { - self.borrowing_clone().into_owned() - } - /// Returns this selectors components as a tuple. - pub fn split(self) -> (KeyExpr<'a>, Cow<'a, str>) { + pub fn split(self) -> (KeyExpr<'a>, Parameters<'a>) { (self.key_expr, self.parameters) } + /// Sets the time range targeted by the selector. + pub fn set_time_range>>(&mut self, time_range: T) { + self.parameters_mut().set_time_range(time_range); + } + /// Sets the `parameters` part of this `Selector`. #[inline(always)] pub fn with_parameters(mut self, parameters: &'a str) -> Self { @@ -141,300 +108,172 @@ impl<'a> Selector<'a> { self } - pub fn extend<'b, I, K, V>(&'b mut self, parameters: I) - where - I: IntoIterator, - I::Item: std::borrow::Borrow<(K, V)>, - K: AsRef + 'b, - V: AsRef + 'b, - { - let it = parameters.into_iter(); - let selector = self.parameters_mut(); - let mut encoder = form_urlencoded::Serializer::new(selector); - encoder.extend_pairs(it).finish(); - } - - /// Sets the time range targeted by the selector. - pub fn with_time_range(&mut self, time_range: TimeRange) { - self.remove_time_range(); - let selector = self.parameters_mut(); - if !selector.is_empty() { - selector.push('&') - } - use std::fmt::Write; - write!(selector, "{TIME_RANGE_KEY}={time_range}").unwrap(); // This unwrap is safe because `String: Write` should be infallibe. - } - - pub fn remove_time_range(&mut self) { - let selector = self.parameters_mut(); - - let mut splice_start = 0; - let mut splice_end = 0; - for argument in selector.split('&') { - if argument.starts_with(TIME_RANGE_KEY) - && matches!( - argument.as_bytes().get(TIME_RANGE_KEY.len()), - None | Some(b'=') - ) - { - splice_end = splice_start + argument.len(); - break; - } - splice_start += argument.len() + 1 - } - if splice_end > 0 { - selector.drain(splice_start..(splice_end + (splice_end != selector.len()) as usize)); - } + /// Extracts the standardized `_time` argument from the selector parameters. + /// + /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. + pub fn time_range(&self) -> ZResult> { + self.parameters().time_range() } #[cfg(any(feature = "unstable", test))] - pub(crate) fn parameter_index(&self, param_name: &str) -> ZResult> { - let starts_with_param = |s: &str| { - if let Some(rest) = s.strip_prefix(param_name) { - matches!(rest.as_bytes().first(), None | Some(b'=')) - } else { - false - } - }; - let mut acc = 0; - let mut res = None; - for chunk in self.parameters().split('&') { - if starts_with_param(chunk) { - if res.is_none() { - res = Some(acc) - } else { - bail!( - "parameter `{}` appeared multiple times in selector `{}`.", - param_name, - self - ) - } - } - acc += chunk.len() as u32 + 1; - } - Ok(res) + pub(crate) fn set_accept_any_keyexpr>>(&mut self, anyke: T) { + self.parameters_mut().set_accept_any_keyexpr(anyke); } + #[cfg(any(feature = "unstable", test))] - pub(crate) fn accept_any_keyexpr(self, any: bool) -> ZResult> { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; - let mut s = self.into_owned(); - let any_selparam = s.parameter_index(_REPLY_KEY_EXPR_ANY_SEL_PARAM)?; - match (any, any_selparam) { - (true, None) => { - let s = s.parameters_mut(); - if !s.is_empty() { - s.push('&') - } - s.push_str(_REPLY_KEY_EXPR_ANY_SEL_PARAM); - } - (false, Some(index)) => { - let s = dbg!(s.parameters_mut()); - let mut start = index as usize; - let pend = start + _REPLY_KEY_EXPR_ANY_SEL_PARAM.len(); - if dbg!(start) != 0 { - start -= 1 - } - match dbg!(&s[pend..]).find('&') { - Some(end) => std::mem::drop(s.drain(start..end + pend)), - None => s.truncate(start), - } - dbg!(s); - } - _ => {} - } - Ok(s) + pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { + self.parameters().accept_any_keyexpr() } } -#[test] -fn selector_accessors() { - let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); - for selector in [ - "hello/there?_timetrick", - "hello/there?_timetrick&_time", - "hello/there?_timetrick&_time&_filter", - "hello/there?_timetrick&_time=[..]", - "hello/there?_timetrick&_time=[..]&_filter", - ] { - let mut selector = Selector::try_from(selector).unwrap(); - selector.with_time_range(time_range); - assert_eq!(selector.time_range().unwrap().unwrap(), time_range); - assert!(dbg!(selector.parameters()).contains("_time=[now(-2s)..now(2s)]")); - let map_selector = selector.parameters_cowmap().unwrap(); - assert_eq!( - selector.time_range().unwrap(), - map_selector.time_range().unwrap() - ); - let without_any = selector.to_string(); - let with_any = selector.to_string() + "&" + crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; - selector = selector.accept_any_keyexpr(false).unwrap(); - assert_eq!(selector.to_string(), without_any); - selector = selector.accept_any_keyexpr(true).unwrap(); - assert_eq!(selector.to_string(), with_any); - selector = selector.accept_any_keyexpr(true).unwrap(); - assert_eq!(selector.to_string(), with_any); - selector = selector.accept_any_keyexpr(false).unwrap(); - assert_eq!(selector.to_string(), without_any); - selector = selector.accept_any_keyexpr(true).unwrap(); - assert_eq!(selector.to_string(), with_any); - selector.parameters_mut().push_str("&other"); - assert_eq!(selector.to_string(), with_any + "&other"); - selector = selector.accept_any_keyexpr(false).unwrap(); - assert_eq!(selector.to_string(), without_any + "&other"); +/// A wrapper type to help decode zenoh selector parameters. +/// +/// Most methods will return an Error if duplicates of a same parameter are found, to avoid HTTP Parameter Pollution like vulnerabilities. +#[repr(transparent)] +#[derive(Clone, PartialEq, Eq)] +pub struct Parameters<'a>(Properties<'a>); + +impl<'a> Deref for Parameters<'a> { + type Target = Properties<'a>; + + fn deref(&self) -> &Self::Target { + &self.0 } } -pub trait Parameter: Sized { - type Name: AsRef + Sized; - type Value: AsRef + Sized; - fn name(&self) -> &Self::Name; - fn value(&self) -> &Self::Value; - fn split(self) -> (Self::Name, Self::Value); - fn extract_name(self) -> Self::Name { - self.split().0 - } - fn extract_value(self) -> Self::Value { - self.split().1 + +impl<'a> DerefMut for Parameters<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } } -impl + Sized, V: AsRef + Sized> Parameter for (N, V) { - type Name = N; - type Value = V; - fn name(&self) -> &N { - &self.0 - } - fn value(&self) -> &V { - &self.1 + +impl std::fmt::Display for Parameters<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) } - fn split(self) -> (Self::Name, Self::Value) { - self +} + +impl std::fmt::Debug for Parameters<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self) } - fn extract_name(self) -> Self::Name { - self.0 +} + +impl<'a, T> From for Parameters<'a> +where + T: Into>, +{ + fn from(value: T) -> Self { + Parameters(value.into()) } - fn extract_value(self) -> Self::Value { - self.1 +} + +impl<'s> From<&'s Parameters<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s Parameters<'s>) -> Self { + HashMap::from(&props.0) } } -#[allow(type_alias_bounds)] -type ExtractedName<'a, VS: Parameters<'a>> = <::Item as Parameter>::Name; -#[allow(type_alias_bounds)] -type ExtractedValue<'a, VS: Parameters<'a>> = <::Item as Parameter>::Value; -/// A trait to help decode zenoh selector parameters. -/// -/// Most methods will return an Error if duplicates of a same parameter are found, to avoid HTTP Parameter Pollution like vulnerabilities. -pub trait Parameters<'a> { - type Decoder: Iterator + 'a; - /// Returns this selector's parameters as an iterator. - fn decode(&'a self) -> Self::Decoder - where - ::Item: Parameter; - - /// Extracts all parameters into a HashMap, returning an error if duplicate parameters arrise. - fn decode_into_map(&'a self) -> ZResult> - where - ::Item: Parameter, - N: AsRef + std::hash::Hash + std::cmp::Eq, - ExtractedName<'a, Self>: Into, - ExtractedValue<'a, Self>: Into, - { - let mut result: HashMap = HashMap::new(); - for (name, value) in self.decode().map(Parameter::split) { - match result.entry(name.into()) { - std::collections::hash_map::Entry::Occupied(e) => { - bail!("Duplicated parameter `{}` detected", e.key().as_ref()) - } - std::collections::hash_map::Entry::Vacant(e) => { - e.insert(value.into()); - } - } - } - Ok(result) +impl From<&Parameters<'_>> for HashMap { + fn from(props: &Parameters) -> Self { + HashMap::from(&props.0) } +} - /// Extracts the requested parameters from the selector parameters. - /// - /// The default implementation is done in a single pass through the selector parameters, returning an error if any of the requested parameters are present more than once. - fn get_parameters( - &'a self, - names: [&str; N], - ) -> ZResult<[Option>; N]> - where - ::Item: Parameter, - { - let mut result = unsafe { - let mut result: std::mem::MaybeUninit<[Option>; N]> = - std::mem::MaybeUninit::uninit(); - for slot in result.assume_init_mut() { - std::ptr::write(slot, None); - } - result.assume_init() - }; - for pair in self.decode() { - if let Some(index) = names.iter().position(|k| *k == pair.name().as_ref()) { - let slot = &mut result[index]; - if slot.is_some() { - bail!("Duplicated parameter `{}` detected.", names[index]) - } - *slot = Some(pair.extract_value()) - } - } - Ok(result) +impl From> for HashMap { + fn from(props: Parameters) -> Self { + HashMap::from(props.0) } +} - /// Extracts the requested arguments from the selector parameters as booleans, following the Zenoh convention that if a parameter name is present and has a value different from "false", its value is truthy. - /// - /// The default implementation is done in a single pass through the selector parameters, returning an error if some of the requested parameters are present more than once. - fn get_bools(&'a self, names: [&str; N]) -> ZResult<[bool; N]> - where - ::Item: Parameter, - { - Ok(self.get_parameters(names)?.map(|v| match v { - None => false, - Some(s) => s.as_ref() != "false", - })) +impl Parameters<'_> { + /// Sets the time range targeted by the selector. + pub fn set_time_range>>(&mut self, time_range: T) { + let mut time_range: Option = time_range.into(); + match time_range.take() { + Some(tr) => self.0.insert(TIME_RANGE_KEY, format!("{}", tr)), + None => self.0.remove(TIME_RANGE_KEY), + }; } /// Extracts the standardized `_time` argument from the selector parameters. /// /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. - fn time_range(&'a self) -> ZResult> - where - ::Item: Parameter, - { - Ok(match &self.get_parameters([TIME_RANGE_KEY])?[0] { - Some(s) => Some(s.as_ref().parse()?), - None => None, - }) + fn time_range(&self) -> ZResult> { + match self.0.get(TIME_RANGE_KEY) { + Some(tr) => Ok(Some(tr.parse()?)), + None => Ok(None), + } } -} -impl<'a> Parameters<'a> for Selector<'a> { - type Decoder = >::Decoder; - fn decode(&'a self) -> Self::Decoder { - self.parameters().decode() + + #[cfg(any(feature = "unstable", test))] + pub(crate) fn set_accept_any_keyexpr>>(&mut self, anyke: T) { + use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; + + let mut anyke: Option = anyke.into(); + match anyke.take() { + Some(ak) => { + if ak { + self.0.insert(ANYKE, "") + } else { + self.0.insert(ANYKE, "false") + } + } + None => self.0.remove(ANYKE), + }; } -} -impl<'a> Parameters<'a> for str { - type Decoder = form_urlencoded::Parse<'a>; - fn decode(&'a self) -> Self::Decoder { - form_urlencoded::parse(self.as_bytes()) + + #[cfg(any(feature = "unstable", test))] + pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { + use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; + + match self.0.get(ANYKE) { + Some(ak) => Ok(Some(ak.parse()?)), + None => Ok(None), + } } } +#[test] +fn selector_accessors() { + let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); + for selector in [ + "hello/there?_timetrick", + "hello/there?_timetrick;_time", + "hello/there?_timetrick;_time;_filter", + "hello/there?_timetrick;_time=[..]", + "hello/there?_timetrick;_time=[..];_filter", + ] { + let mut selector = Selector::try_from(selector).unwrap(); + println!("Parameters start: {}", selector.parameters()); + for i in selector.parameters().iter() { + println!("\t{:?}", i); + } + + assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); + + selector.set_time_range(time_range); + assert_eq!(selector.time_range().unwrap().unwrap(), time_range); + assert!(selector.parameters().contains_key(TIME_RANGE_KEY)); -impl<'a, K: Borrow + Hash + Eq + 'a, V: Borrow + 'a> Parameters<'a> for HashMap { - type Decoder = std::collections::hash_map::Iter<'a, K, V>; - fn decode(&'a self) -> Self::Decoder { - self.iter() - } - fn get_parameters( - &'a self, - names: [&str; N], - ) -> ZResult<[Option>; N]> - where - ::Item: Parameter, - { - // `Ok(names.map(|key| self.get(key)))` would be very slightly faster, but doesn't compile for some reason :( - Ok(names.map(|key| self.get_key_value(key).map(|kv| kv.extract_value()))) + let hm: HashMap = HashMap::from(selector.parameters()); + assert!(hm.contains_key(TIME_RANGE_KEY)); + + let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); + assert!(hm.contains_key(TIME_RANGE_KEY)); + + selector.parameters_mut().insert("_filter", ""); + assert_eq!(selector.parameters().get("_filter").unwrap(), ""); + + selector.set_accept_any_keyexpr(true); + + println!("Parameters end: {}", selector.parameters()); + for i in selector.parameters().iter() { + println!("\t{:?}", i); + } + assert_eq!( + &format!("{}", selector), + "hello/there?_anyke;_filter;_time=[now(-2s)..now(2s)];_timetrick" + ); } } @@ -448,7 +287,7 @@ impl std::fmt::Display for Selector<'_> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{}", self.key_expr)?; if !self.parameters.is_empty() { - write!(f, "?{}", self.parameters)?; + write!(f, "?{}", self.parameters.as_str())?; } Ok(()) } @@ -504,7 +343,7 @@ impl<'a> From<&'a Query> for Selector<'a> { fn from(q: &'a Query) -> Self { Selector { key_expr: q.inner.key_expr.clone(), - parameters: (&q.inner.parameters).into(), + parameters: q.inner.parameters.clone(), } } } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index ca5d44c3a6..beaecba314 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -24,8 +24,8 @@ use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; use crate::payload::Payload; +use crate::prelude::KeyExpr; use crate::prelude::Locality; -use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; use crate::query::*; use crate::queryable::*; @@ -1621,7 +1621,7 @@ impl Session { let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { ConsolidationMode::Auto => { - if selector.decode().any(|(k, _)| k.as_ref() == TIME_RANGE_KEY) { + if selector.parameters().contains_key(TIME_RANGE_KEY) { ConsolidationMode::None } else { ConsolidationMode::Latest @@ -1728,7 +1728,7 @@ impl Session { self.handle_query( true, &wexpr, - selector.parameters(), + selector.parameters().as_str(), qid, target, consolidation, @@ -1797,13 +1797,11 @@ impl Session { } }; - let parameters = parameters.to_owned(); - let zid = self.runtime.zid(); let query_inner = Arc::new(QueryInner { key_expr, - parameters, + parameters: parameters.to_owned().into(), value: body.map(|b| Value { payload: b.payload.into(), encoding: b.encoding.into(), @@ -2189,13 +2187,8 @@ impl Primitives for Session { }; match state.queries.get_mut(&msg.rid) { Some(query) => { - if !matches!( - query - .selector - .parameters() - .get_bools([crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]), - Ok([true]) - ) && !query.selector.key_expr.intersects(&key_expr) + if !matches!(query.selector.accept_any_keyexpr(), Ok(Some(true))) + && !query.selector.key_expr.intersects(&key_expr) { log::warn!( "Received Reply for `{}` from `{:?}, which didn't match query `{}`: dropping Reply.", diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 267bb5c284..77850b7c7c 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -148,7 +148,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re .declare_queryable(key_expr) .callback(move |query| { c_msgs.fetch_add(1, Ordering::Relaxed); - match query.parameters() { + match query.parameters().as_str() { "ok_put" => { tokio::task::block_in_place(|| { tokio::runtime::Handle::current().block_on(async { From f181b435bd3a7d333d996177e0d1b0bcf1991545 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 12 Apr 2024 22:13:46 +0200 Subject: [PATCH 208/357] Fix import --- commons/zenoh-collections/src/parameters.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs index 536f2beb2a..0d6e051397 100644 --- a/commons/zenoh-collections/src/parameters.rs +++ b/commons/zenoh-collections/src/parameters.rs @@ -15,6 +15,8 @@ pub const LIST_SEPARATOR: char = ';'; pub const FIELD_SEPARATOR: char = '='; pub const VALUE_SEPARATOR: char = '|'; +use alloc::{string::String, vec::Vec}; + fn split_once(s: &str, c: char) -> (&str, &str) { match s.find(c) { Some(index) => { From e7fbbf3956e97b1d18df8584e52c5ec166e2f12c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 12 Apr 2024 22:31:36 +0200 Subject: [PATCH 209/357] Payload Cow<'static, _> deserializer --- zenoh/src/payload.rs | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 11a6f0c360..35d9ea6e58 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -543,6 +543,15 @@ impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { } } +impl From for Cow<'static, [u8]> { + fn from(v: Payload) -> Self { + match v.0.contiguous() { + Cow::Borrowed(s) => Cow::Owned(s.to_vec()), + Cow::Owned(s) => Cow::Owned(s), + } + } +} + impl<'a> From<&'a Payload> for Cow<'a, [u8]> { fn from(value: &'a Payload) -> Self { ZSerde.deserialize(value).unwrap_infallible() @@ -650,7 +659,15 @@ impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { type Error = Utf8Error; fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { - let v: Cow<[u8]> = Self.deserialize(v).unwrap_infallible(); + Cow::try_from(v) + } +} + +impl TryFrom for Cow<'static, str> { + type Error = Utf8Error; + + fn try_from(v: Payload) -> Result { + let v: Cow<'static, [u8]> = Cow::from(v); let _ = core::str::from_utf8(v.as_ref())?; // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. @@ -661,8 +678,12 @@ impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { type Error = Utf8Error; - fn try_from(value: &'a Payload) -> Result { - ZSerde.deserialize(value) + fn try_from(v: &'a Payload) -> Result { + let v: Cow<'a, [u8]> = Cow::from(v); + let _ = core::str::from_utf8(v.as_ref())?; + // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 + // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. + Ok(unsafe { core::mem::transmute(v) }) } } From 27b7e06a04927cecc49e345c2b5089ec3bb40c63 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 12 Apr 2024 22:49:22 +0200 Subject: [PATCH 210/357] Impl Payload Serialize/Deserialize for Properties --- commons/zenoh-collections/src/properties.rs | 9 +++ zenoh/src/payload.rs | 63 +++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 026dd69f72..6b6f1de908 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -126,6 +126,15 @@ impl From for Properties<'_> { } } +impl<'s> From> for Properties<'s> { + fn from(value: Cow<'s, str>) -> Self { + match value { + Cow::Borrowed(s) => Properties::from(s), + Cow::Owned(s) => Properties::from(s), + } + } +} + impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Properties<'_> where K: AsRef + 's, diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 35d9ea6e58..3f9fed1e90 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -28,6 +28,7 @@ use zenoh_buffers::{ ZBufReader, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; +use zenoh_collections::Properties; use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::SharedMemoryBuf; @@ -842,6 +843,63 @@ impl TryFrom<&Payload> for bool { } // - Zenoh advanced types encoders/decoders +// Properties +impl Serialize> for ZSerde { + type Output = Payload; + + fn serialize(self, t: Properties<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl From> for Payload { + fn from(t: Properties<'_>) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&Properties<'_>> for ZSerde { + type Output = Payload; + + fn serialize(self, t: &Properties<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl<'s> From<&'s Properties<'s>> for Payload { + fn from(t: &'s Properties<'s>) -> Self { + ZSerde.serialize(t) + } +} + +impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { + type Error = ZDeserializeError; + + fn deserialize(self, v: &'s Payload) -> Result, Self::Error> { + let s = v + .deserialize::>() + .map_err(|_| ZDeserializeError)?; + Ok(Properties::from(s)) + } +} + +impl TryFrom for Properties<'static> { + type Error = ZDeserializeError; + + fn try_from(v: Payload) -> Result { + let s = v.deserialize::>().map_err(|_| ZDeserializeError)?; + Ok(Properties::from(s.into_owned())) + } +} + +impl<'s> TryFrom<&'s Payload> for Properties<'s> { + type Error = ZDeserializeError; + + fn try_from(value: &'s Payload) -> Result { + ZSerde.deserialize(value) + } +} + // JSON impl Serialize for ZSerde { type Output = Result; @@ -1313,6 +1371,7 @@ mod tests { use rand::Rng; use std::borrow::Cow; use zenoh_buffers::{ZBuf, ZSlice}; + use zenoh_collections::Properties; const NUM: usize = 1_000; @@ -1405,6 +1464,10 @@ mod tests { serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + // Properties + serialize_deserialize!(Properties, Properties::from("")); + serialize_deserialize!(Properties, Properties::from("a=1;b=2;c3")); + // Tuple serialize_deserialize!((usize, usize), (0, 1)); serialize_deserialize!((usize, String), (0, String::from("a"))); From 0f4477df69349c8c994bdf2425d8a12787d47a3b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 01:17:08 +0200 Subject: [PATCH 211/357] Fix paramters concat bug --- commons/zenoh-collections/src/parameters.rs | 30 ++++++++++++--------- commons/zenoh-collections/src/properties.rs | 1 - 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs index 0d6e051397..9bb6eb4148 100644 --- a/commons/zenoh-collections/src/parameters.rs +++ b/commons/zenoh-collections/src/parameters.rs @@ -32,13 +32,8 @@ pub struct Parameters; impl Parameters { pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { - s.split(LIST_SEPARATOR).filter_map(|prop| { - if prop.is_empty() { - None - } else { - Some(split_once(prop, FIELD_SEPARATOR)) - } - }) + s.split(LIST_SEPARATOR) + .filter_map(|prop| (!prop.is_empty()).then(|| split_once(prop, FIELD_SEPARATOR))) } #[allow(clippy::should_implement_trait)] @@ -55,13 +50,22 @@ impl Parameters { where I: Iterator, { - let mut from = iter - .filter(|(k, _)| !k.is_empty()) - .collect::>(); - from.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); + let mut from = iter.collect::>(); + from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); Self::concat_into(from.iter().copied(), into); } + pub fn from_slice_mut(slice: &mut [(&str, &str)]) -> String { + let mut into = String::new(); + Self::from_slice_mut_into(slice, &mut into); + into + } + + pub fn from_slice_mut_into(slice: &mut [(&str, &str)], into: &mut String) { + slice.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); + Self::concat_into(slice.iter().copied(), into); + } + pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { Self::iter(s) .find(|(key, _)| *key == k) @@ -147,8 +151,8 @@ impl Parameters { where I: Iterator, { - let mut first = into.is_empty(); - for (k, v) in iter { + let mut first = true; + for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { if !first { into.push(LIST_SEPARATOR); } diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 6b6f1de908..af881a94a6 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -82,7 +82,6 @@ impl Properties<'_> { pub fn extend<'s, I, K, V>(&mut self, iter: I) where I: IntoIterator, - // I::Item: std::borrow::Borrow<(K, V)>, K: AsRef + 's, V: AsRef + 's, { From e91c9978bdd87f2db3e6d4520d3c3a28774d2d07 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 01:49:22 +0200 Subject: [PATCH 212/357] Properties use borrow trait --- commons/zenoh-collections/src/properties.rs | 47 +++++++++++++++------ 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index af881a94a6..9b0619223c 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -82,12 +82,12 @@ impl Properties<'_> { pub fn extend<'s, I, K, V>(&mut self, iter: I) where I: IntoIterator, - K: AsRef + 's, - V: AsRef + 's, + K: Borrow + 's, + V: Borrow + 's, { self.0 = Cow::Owned(Parameters::extend( Parameters::iter(self.as_str()), - iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref())), + iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow())), )); } @@ -136,30 +136,30 @@ impl<'s> From> for Properties<'s> { impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Properties<'_> where - K: AsRef + 's, - V: AsRef + 's, + K: Borrow + 's, + V: Borrow + 's, { fn from_iter>(iter: T) -> Self { - let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref()))); + let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))); Self(Cow::Owned(inner)) } } impl<'s, K, V> FromIterator<&'s (K, V)> for Properties<'_> where - K: AsRef + 's, - V: AsRef + 's, + K: Borrow + 's, + V: Borrow + 's, { fn from_iter>(iter: T) -> Self { - let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.as_ref(), v.as_ref()))); + let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))); Self(Cow::Owned(inner)) } } impl<'s, K, V> From<&'s [(K, V)]> for Properties<'_> where - K: AsRef + 's, - V: AsRef + 's, + K: Borrow + 's, + V: Borrow + 's, { fn from(value: &'s [(K, V)]) -> Self { Self::from_iter(value.iter()) @@ -169,8 +169,8 @@ where #[cfg(feature = "std")] impl From> for Properties<'_> where - K: AsRef, - V: AsRef, + K: Borrow, + V: Borrow, { fn from(map: HashMap) -> Self { Self::from_iter(map.iter()) @@ -193,6 +193,15 @@ impl From<&Properties<'_>> for HashMap { } } +#[cfg(feature = "std")] +impl<'s> From<&'s Properties<'s>> for HashMap, Cow<'s, str>> { + fn from(props: &'s Properties<'s>) -> Self { + HashMap::from_iter( + Parameters::iter(props.as_str()).map(|(k, v)| (Cow::from(k), Cow::from(v))), + ) + } +} + #[cfg(feature = "std")] impl From> for HashMap { fn from(props: Properties) -> Self { @@ -251,5 +260,17 @@ mod tests { Properties::from("p1=x=y;p2=a==b"), Properties::from(&[("p1", "x=y"), ("p2", "a==b")][..]) ); + + let mut hm: HashMap = HashMap::new(); + hm.insert("p1".to_string(), "v1".to_string()); + assert_eq!(Properties::from(hm), Properties::from("p1=v1")); + + let mut hm: HashMap<&str, &str> = HashMap::new(); + hm.insert("p1", "v1"); + assert_eq!(Properties::from(hm), Properties::from("p1=v1")); + + let mut hm: HashMap, Cow> = HashMap::new(); + hm.insert(Cow::from("p1"), Cow::from("v1")); + assert_eq!(Properties::from(hm), Properties::from("p1=v1")); } } From 149ab064bfc1a6446d4ddccb03c137d6b390442f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 01:51:43 +0200 Subject: [PATCH 213/357] Fix clippy --- commons/zenoh-collections/src/parameters.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs index 9bb6eb4148..e86dfa2623 100644 --- a/commons/zenoh-collections/src/parameters.rs +++ b/commons/zenoh-collections/src/parameters.rs @@ -33,7 +33,8 @@ pub struct Parameters; impl Parameters { pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { s.split(LIST_SEPARATOR) - .filter_map(|prop| (!prop.is_empty()).then(|| split_once(prop, FIELD_SEPARATOR))) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) } #[allow(clippy::should_implement_trait)] From 14a203676492da6f8d9910337d9d4f39cf1ba8b5 Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Sat, 13 Apr 2024 02:00:45 +0200 Subject: [PATCH 214/357] make Selector::key_expr pub(crate) and add corresponding accessor (#928) --- .../src/queryable_get/bin/z_queryable_get.rs | 2 +- examples/examples/z_storage.rs | 2 +- plugins/zenoh-plugin-example/src/lib.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 4 ++-- plugins/zenoh-plugin-storage-manager/src/lib.rs | 8 ++++---- zenoh-ext/src/publication_cache.rs | 6 +++--- zenoh/src/selector.rs | 8 ++++++-- 7 files changed, 18 insertions(+), 14 deletions(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 84c3a82f88..a5111c11e3 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -33,7 +33,7 @@ async fn main() { zenoh_runtime::ZRuntime::Application.block_in_place(async move { query .reply( - query.selector().key_expr, + query.selector().key_expr(), query.value().unwrap().payload().clone(), ) .res() diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index cb2f40c125..8ae8c4c678 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -60,7 +60,7 @@ async fn main() { let query = query.unwrap(); println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { - if query.selector().key_expr.intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { + if query.selector().key_expr().intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { query.reply(sample.key_expr().clone(), sample.payload().clone()).res().await.unwrap(); } } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index ad254278e3..90b88e8095 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -174,7 +174,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { let query = query.unwrap(); info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { - if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { + if query.selector().key_expr().intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { query.reply_sample(sample.clone()).res().await.unwrap(); } } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 6edcfdb945..1068d07163 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -274,7 +274,7 @@ impl RunningPluginTrait for RunningPlugin { with_extended_string(&mut key, &["/version"], |key| { if keyexpr::new(key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { responses.push(zenoh::plugins::Response::new( key.clone(), @@ -285,7 +285,7 @@ impl RunningPluginTrait for RunningPlugin { with_extended_string(&mut key, &["/port"], |port_key| { if keyexpr::new(port_key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { responses.push(zenoh::plugins::Response::new( port_key.clone(), diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 91df2f108d..6aa0a09f9a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -304,7 +304,7 @@ impl RunningPluginTrait for StorageRuntime { with_extended_string(&mut key, &["/version"], |key| { if keyexpr::new(key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { responses.push(zenoh::plugins::Response::new( key.clone(), @@ -319,7 +319,7 @@ impl RunningPluginTrait for StorageRuntime { with_extended_string(key, &["/__path__"], |key| { if keyexpr::new(key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { responses.push(zenoh::plugins::Response::new( key.clone(), @@ -329,7 +329,7 @@ impl RunningPluginTrait for StorageRuntime { }); if keyexpr::new(key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { responses.push(zenoh::plugins::Response::new( key.clone(), @@ -345,7 +345,7 @@ impl RunningPluginTrait for StorageRuntime { with_extended_string(key, &[storage], |key| { if keyexpr::new(key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(selector.key_expr()) { if let Ok(value) = task::block_on(async { let (tx, rx) = async_std::channel::bounded(1); diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 9f2b645da9..bbc90c0e8f 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -205,8 +205,8 @@ impl<'a> PublicationCache<'a> { // on query, reply with cach content query = quer_recv.recv_async() => { if let Ok(query) = query { - if !query.selector().key_expr.as_str().contains('*') { - if let Some(queue) = cache.get(query.selector().key_expr.as_keyexpr()) { + if !query.selector().key_expr().as_str().contains('*') { + if let Some(queue) = cache.get(query.selector().key_expr().as_keyexpr()) { for sample in queue { if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ @@ -220,7 +220,7 @@ impl<'a> PublicationCache<'a> { } } else { for (key_expr, queue) in cache.iter() { - if query.selector().key_expr.intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { + if query.selector().key_expr().intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { for sample in queue { if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 2a9a38c02c..26f9e09c57 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -65,13 +65,17 @@ use std::{ #[derive(Clone, PartialEq, Eq)] pub struct Selector<'a> { /// The part of this selector identifying which keys should be part of the selection. - pub key_expr: KeyExpr<'a>, - /// the part of this selector identifying which values should be part of the selection. + pub(crate) key_expr: KeyExpr<'a>, + /// The part of this selector identifying which values should be part of the selection. pub(crate) parameters: Cow<'a, str>, } pub const TIME_RANGE_KEY: &str = "_time"; impl<'a> Selector<'a> { + /// Gets the key-expression. + pub fn key_expr(&'a self) -> &KeyExpr<'a> { + &self.key_expr + } /// Gets the parameters as a raw string. pub fn parameters(&self) -> &str { &self.parameters From 94b62f5fc86280d6ac28792837fbaca437de4810 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 14:45:42 +0200 Subject: [PATCH 215/357] Sorted parameters --- commons/zenoh-collections/src/parameters.rs | 122 +++++++++++++----- commons/zenoh-collections/src/properties.rs | 44 +++---- commons/zenoh-protocol/src/core/endpoint.rs | 94 +++++++++----- io/zenoh-transport/src/multicast/manager.rs | 2 +- io/zenoh-transport/src/unicast/manager.rs | 4 +- io/zenoh-transport/tests/endpoints.rs | 8 +- .../tests/unicast_authenticator.rs | 8 +- io/zenoh-transport/tests/unicast_multilink.rs | 8 +- io/zenoh-transport/tests/unicast_openclose.rs | 8 +- io/zenoh-transport/tests/unicast_time.rs | 8 +- io/zenoh-transport/tests/unicast_transport.rs | 34 +++-- zenoh/src/selector.rs | 93 +++++++------ 12 files changed, 252 insertions(+), 181 deletions(-) diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-collections/src/parameters.rs index e86dfa2623..b49ee1a1f9 100644 --- a/commons/zenoh-collections/src/parameters.rs +++ b/commons/zenoh-collections/src/parameters.rs @@ -28,15 +28,10 @@ fn split_once(s: &str, c: char) -> (&str, &str) { } /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -pub struct Parameters; - -impl Parameters { - pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { - s.split(LIST_SEPARATOR) - .filter(|p| !p.is_empty()) - .map(|p| split_once(p, FIELD_SEPARATOR)) - } +/// [`SortedParameters`] it's like [`Parameters`] but with the guarantee that keys are sorted upon insertion. +pub struct SortedParameters; +impl SortedParameters { #[allow(clippy::should_implement_trait)] pub fn from_iter<'s, I>(iter: I) -> String where @@ -53,18 +48,85 @@ impl Parameters { { let mut from = iter.collect::>(); from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - Self::concat_into(from.iter().copied(), into); + Parameters::from_iter_into(from.iter().copied(), into); + } + + pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + where + I: Iterator + Clone, + { + let mut ic = iter.clone(); + let item = ic.find(|(key, _)| *key == k).map(|(_, v)| v); + + let current = iter.filter(|x| x.0 != k); + let new = Some((k, v)).into_iter(); + let iter = current.chain(new); + (SortedParameters::from_iter(iter), item) + } + + pub fn join<'s, C, N>(current: C, new: N) -> String + where + C: Iterator + Clone, + N: Iterator + Clone, + { + let mut into = String::new(); + SortedParameters::join_into(current, new, &mut into); + into + } + + pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) + where + C: Iterator + Clone, + N: Iterator + Clone, + { + let n = new.clone(); + let current = current + .clone() + .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); + let iter = current.chain(new); + SortedParameters::from_iter_into(iter, into); + } + + pub fn is_sorted<'s, I>(iter: I) -> bool + where + I: Iterator, + { + let mut prev = None; + for (k, _) in iter { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true + } +} + +/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. +pub struct Parameters; + +impl Parameters { + pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + s.split(LIST_SEPARATOR) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) } - pub fn from_slice_mut(slice: &mut [(&str, &str)]) -> String { + #[allow(clippy::should_implement_trait)] + pub fn from_iter<'s, I>(iter: I) -> String + where + I: Iterator, + { let mut into = String::new(); - Self::from_slice_mut_into(slice, &mut into); + Self::from_iter_into(iter, &mut into); into } - pub fn from_slice_mut_into(slice: &mut [(&str, &str)], into: &mut String) { - slice.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - Self::concat_into(slice.iter().copied(), into); + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + Self::concat_into(iter, into); } pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { @@ -106,39 +168,29 @@ impl Parameters { (Parameters::concat(iter), item) } - pub fn extend<'s, C, N>(current: C, new: N) -> String + pub fn join<'s, C, N>(current: C, new: N) -> String where - C: Iterator, - N: Iterator, + C: Iterator + Clone, + N: Iterator + Clone, { let mut into = String::new(); - Parameters::extend_into(current, new, &mut into); + Parameters::join_into(current, new, &mut into); into } - pub fn extend_into<'s, C, N>(current: C, new: N, into: &mut String) + pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where - C: Iterator, - N: Iterator, + C: Iterator + Clone, + N: Iterator + Clone, { + let n = new.clone(); + let current = current + .clone() + .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); let iter = current.chain(new); Parameters::from_iter_into(iter, into); } - pub fn is_sorted<'s, I>(iter: I) -> bool - where - I: Iterator, - { - let mut prev = None; - for (k, _) in iter { - match prev.take() { - Some(p) if k < p => return false, - _ => prev = Some(k), - } - } - true - } - fn concat<'s, I>(iter: I) -> String where I: Iterator, diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 9b0619223c..800d2ed9dc 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -79,15 +79,15 @@ impl Properties<'_> { removed } - pub fn extend<'s, I, K, V>(&mut self, iter: I) + pub fn join<'s, I, K, V>(&mut self, iter: I) where - I: IntoIterator, - K: Borrow + 's, - V: Borrow + 's, + I: Iterator + Clone, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, { - self.0 = Cow::Owned(Parameters::extend( + self.0 = Cow::Owned(Parameters::join( Parameters::iter(self.as_str()), - iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow())), + iter.map(|(k, v)| (k.borrow(), v.borrow())), )); } @@ -98,30 +98,20 @@ impl Properties<'_> { impl<'s> From<&'s str> for Properties<'s> { fn from(mut value: &'s str) -> Self { - if Parameters::is_sorted(Parameters::iter(value)) { - value = value.trim_end_matches(|c| { - c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR - }); - Self(Cow::Borrowed(value)) - } else { - Self(Cow::Owned(Parameters::from_iter(Parameters::iter(value)))) - } + value = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + Self(Cow::Borrowed(value)) } } impl From for Properties<'_> { fn from(mut value: String) -> Self { - if Parameters::is_sorted(Parameters::iter(value.as_str())) { - let s = value.trim_end_matches(|c| { - c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR - }); - value.truncate(s.len()); - Self(Cow::Owned(value)) - } else { - Self(Cow::Owned(Parameters::from_iter(Parameters::iter( - value.as_str(), - )))) - } + let s = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + value.truncate(s.len()); + Self(Cow::Owned(value)) } } @@ -136,8 +126,8 @@ impl<'s> From> for Properties<'s> { impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Properties<'_> where - K: Borrow + 's, - V: Borrow + 's, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, { fn from_iter>(iter: T) -> Self { let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))); diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 03678fb675..debe7da7b5 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -13,8 +13,8 @@ // use super::locator::*; use alloc::{borrow::ToOwned, format, string::String}; -use core::{convert::TryFrom, fmt, str::FromStr}; -use zenoh_collections::Parameters; +use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; +use zenoh_collections::{Parameters, SortedParameters}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; // Parsing chars @@ -240,25 +240,35 @@ impl<'a> MetadataMut<'a> { } impl MetadataMut<'_> { - pub fn extend(&mut self, iter: I) -> ZResult<()> + pub fn join<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> where - I: Iterator, - K: AsRef, - V: AsRef, + I: Iterator + Clone, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, { - for (k, v) in iter { - let k: &str = k.as_ref(); - let v: &str = v.as_ref(); - self.insert(k, v)? - } + let ep = EndPoint::new( + self.0.protocol(), + self.0.address(), + SortedParameters::join( + self.0.metadata().iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + ), + self.0.config(), + )?; + + self.0.inner = ep.inner; Ok(()) } - pub fn insert(&mut self, k: &str, v: &str) -> ZResult<()> { + pub fn insert(&mut self, k: K, v: V) -> ZResult<()> + where + K: Borrow, + V: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::insert(self.0.metadata().iter(), k, v).0, + SortedParameters::insert(self.0.metadata().iter(), k.borrow(), v.borrow()).0, self.0.config(), )?; @@ -266,11 +276,14 @@ impl MetadataMut<'_> { Ok(()) } - pub fn remove(&mut self, k: &str) -> ZResult<()> { + pub fn remove(&mut self, k: K) -> ZResult<()> + where + K: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::remove(self.0.metadata().iter(), k).0, + Parameters::remove(self.0.metadata().iter(), k.borrow()).0, self.0.config(), )?; @@ -357,38 +370,51 @@ impl<'a> ConfigMut<'a> { } impl ConfigMut<'_> { - pub fn extend(&mut self, iter: I) -> ZResult<()> + pub fn join<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> where - I: Iterator, - K: AsRef, - V: AsRef, + I: Iterator + Clone, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, { - for (k, v) in iter { - let k: &str = k.as_ref(); - let v: &str = v.as_ref(); - self.insert(k, v)? - } + let ep = EndPoint::new( + self.0.protocol(), + self.0.address(), + self.0.metadata(), + SortedParameters::join( + self.0.config().iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + ), + )?; + + self.0.inner = ep.inner; Ok(()) } - pub fn insert(&mut self, k: &str, v: &str) -> ZResult<()> { + pub fn insert(&mut self, k: K, v: V) -> ZResult<()> + where + K: Borrow, + V: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::insert(self.0.config().iter(), k, v).0, + SortedParameters::insert(self.0.config().iter(), k.borrow(), v.borrow()).0, )?; self.0.inner = ep.inner; Ok(()) } - pub fn remove(&mut self, k: &str) -> ZResult<()> { + pub fn remove(&mut self, k: K) -> ZResult<()> + where + K: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::remove(self.0.config().iter(), k).0, + Parameters::remove(self.0.config().iter(), k.borrow()).0, )?; self.0.inner = ep.inner; @@ -550,14 +576,14 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - Parameters::from_iter_into(Parameters::iter(&s[midx + 1..]), &mut inner); + SortedParameters::from_iter_into(Parameters::iter(&s[midx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some config (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - Parameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + SortedParameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some metadata and some config @@ -570,10 +596,10 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - Parameters::from_iter_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); + SortedParameters::from_iter_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); inner.push(CONFIG_SEPARATOR); - Parameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + SortedParameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } @@ -792,14 +818,14 @@ fn endpoints() { let mut endpoint = EndPoint::from_str("udp/127.0.0.1:7447").unwrap(); endpoint .metadata_mut() - .extend([("a", "1"), ("c", "3"), ("b", "2")].iter().copied()) + .join([("a", "1"), ("c", "3"), ("b", "2")].iter().copied()) .unwrap(); assert_eq!(endpoint.as_str(), "udp/127.0.0.1:7447?a=1;b=2;c=3"); let mut endpoint = EndPoint::from_str("udp/127.0.0.1:7447").unwrap(); endpoint .config_mut() - .extend([("A", "1"), ("C", "3"), ("B", "2")].iter().copied()) + .join([("A", "1"), ("C", "3"), ("B", "2")].iter().copied()) .unwrap(); assert_eq!(endpoint.as_str(), "udp/127.0.0.1:7447#A=1;B=2;C=3"); diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index daf49ce9a3..a6f682edc9 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -260,7 +260,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().extend(Parameters::iter(config))?; + endpoint.config_mut().join(Parameters::iter(config))?; } // Open the link diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index eb0339c35b..0fdce265f9 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -380,7 +380,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().extend(Parameters::iter(config))?; + endpoint.config_mut().join(Parameters::iter(config))?; }; manager.new_listener(endpoint).await } @@ -689,7 +689,7 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().extend(Parameters::iter(config))?; + endpoint.config_mut().join(Parameters::iter(config))?; }; // Create a new link associated by calling the Link Manager diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index 13a605a588..def493e88f 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -317,13 +317,13 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM let mut endpoint: EndPoint = format!("tls/localhost:{}", 7070).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -396,13 +396,13 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM let mut endpoint: EndPoint = format!("quic/localhost:{}", 7080).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); let endpoints = vec![endpoint]; diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index d94ade1ce1..63f1c785b7 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -802,14 +802,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 8030).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -902,14 +902,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 8040).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index 5e4499be2a..54a31f62c3 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -611,14 +611,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 18030).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -709,14 +709,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 18040).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index 56e4a1b140..3f57ebfd62 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -639,14 +639,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -737,14 +737,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index 75d3ae1d98..668df34cd6 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -398,14 +398,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -497,14 +497,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index c0af98eb46..2fffb2f811 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -994,14 +994,14 @@ async fn transport_unicast_tls_only_server() { let mut endpoint: EndPoint = format!("tls/localhost:{}", 16070).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), (TLS_SERVER_PRIVATE_KEY_RAW, SERVER_KEY), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1039,14 +1039,14 @@ async fn transport_unicast_quic_only_server() { let mut endpoint: EndPoint = format!("quic/localhost:{}", 16080).parse().unwrap(); endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), (TLS_SERVER_PRIVATE_KEY_RAW, SERVER_KEY), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1087,7 +1087,7 @@ async fn transport_unicast_tls_only_mutual_success() { let mut client_endpoint: EndPoint = ("tls/localhost:10461").parse().unwrap(); client_endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_CLIENT_CERTIFICATE_RAW, CLIENT_CERT), @@ -1095,7 +1095,7 @@ async fn transport_unicast_tls_only_mutual_success() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1103,7 +1103,7 @@ async fn transport_unicast_tls_only_mutual_success() { let mut server_endpoint: EndPoint = ("tls/localhost:10461").parse().unwrap(); server_endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1111,7 +1111,7 @@ async fn transport_unicast_tls_only_mutual_success() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control @@ -1157,18 +1157,14 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { let mut client_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); client_endpoint .config_mut() - .extend( - [(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)] - .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), - ) + .join([(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)].iter().copied()) .unwrap(); // Define the locator let mut server_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); server_endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1176,7 +1172,7 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { (TLS_CLIENT_AUTH, "true"), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control @@ -1227,7 +1223,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let mut client_endpoint: EndPoint = ("tls/localhost:10463").parse().unwrap(); client_endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), // Using the SERVER_CERT and SERVER_KEY in the client to simulate the case the client has @@ -1239,7 +1235,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1247,7 +1243,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let mut server_endpoint: EndPoint = ("tls/localhost:10463").parse().unwrap(); server_endpoint .config_mut() - .extend( + .join( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1255,7 +1251,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 7d5326638e..51ee72f98a 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -233,49 +233,6 @@ impl Parameters<'_> { } } } -#[test] -fn selector_accessors() { - let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); - for selector in [ - "hello/there?_timetrick", - "hello/there?_timetrick;_time", - "hello/there?_timetrick;_time;_filter", - "hello/there?_timetrick;_time=[..]", - "hello/there?_timetrick;_time=[..];_filter", - ] { - let mut selector = Selector::try_from(selector).unwrap(); - println!("Parameters start: {}", selector.parameters()); - for i in selector.parameters().iter() { - println!("\t{:?}", i); - } - - assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); - - selector.set_time_range(time_range); - assert_eq!(selector.time_range().unwrap().unwrap(), time_range); - assert!(selector.parameters().contains_key(TIME_RANGE_KEY)); - - let hm: HashMap = HashMap::from(selector.parameters()); - assert!(hm.contains_key(TIME_RANGE_KEY)); - - let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); - assert!(hm.contains_key(TIME_RANGE_KEY)); - - selector.parameters_mut().insert("_filter", ""); - assert_eq!(selector.parameters().get("_filter").unwrap(), ""); - - selector.set_accept_any_keyexpr(true); - - println!("Parameters end: {}", selector.parameters()); - for i in selector.parameters().iter() { - println!("\t{:?}", i); - } - assert_eq!( - &format!("{}", selector), - "hello/there?_anyke;_filter;_time=[now(-2s)..now(2s)];_timetrick" - ); - } -} impl std::fmt::Debug for Selector<'_> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -392,3 +349,53 @@ impl<'a> From> for Selector<'a> { } } } + +#[test] +fn selector_accessors() { + let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); + for selector in [ + "hello/there?_timetrick", + "hello/there?_timetrick;_time", + "hello/there?_timetrick;_time;_filter", + "hello/there?_timetrick;_time=[..]", + "hello/there?_timetrick;_time=[..];_filter", + ] { + let mut selector = Selector::try_from(selector).unwrap(); + println!("Parameters start: {}", selector.parameters()); + for i in selector.parameters().iter() { + println!("\t{:?}", i); + } + + assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); + + selector.set_time_range(time_range); + assert_eq!(selector.time_range().unwrap().unwrap(), time_range); + assert!(selector.parameters().contains_key(TIME_RANGE_KEY)); + + let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); + assert!(hm.contains_key(TIME_RANGE_KEY)); + + selector.parameters_mut().insert("_filter", ""); + assert_eq!(selector.parameters().get("_filter").unwrap(), ""); + + let hm: HashMap = HashMap::from(selector.parameters()); + assert!(hm.contains_key(TIME_RANGE_KEY)); + + selector.parameters_mut().join(hm.iter()); + assert_eq!(selector.parameters().get("_filter").unwrap(), ""); + + selector.set_accept_any_keyexpr(true); + + println!("Parameters end: {}", selector.parameters()); + for i in selector.parameters().iter() { + println!("\t{:?}", i); + } + + assert_eq!( + HashMap::::from(selector.parameters()), + HashMap::::from(Parameters::from( + "_anyke;_filter;_time=[now(-2s)..now(2s)];_timetrick" + )) + ); + } +} From f79033d596eddad3fd2e5dbc61d3843998bbf1ba Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 15:57:03 +0200 Subject: [PATCH 216/357] Fix conditional feature --- zenoh/src/session.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index beaecba314..e8b6660b9b 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2187,9 +2187,12 @@ impl Primitives for Session { }; match state.queries.get_mut(&msg.rid) { Some(query) => { - if !matches!(query.selector.accept_any_keyexpr(), Ok(Some(true))) - && !query.selector.key_expr.intersects(&key_expr) - { + let c = zcondfeat!( + "unstable", + !matches!(query.selector.accept_any_keyexpr(), Ok(Some(true))), + true + ); + if c && !query.selector.key_expr.intersects(&key_expr) { log::warn!( "Received Reply for `{}` from `{:?}, which didn't match query `{}`: dropping Reply.", key_expr, From f77e1b7aca2c657142986d19695491bf861f7b3d Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Sat, 13 Apr 2024 16:26:02 +0200 Subject: [PATCH 217/357] Fix conditional feature --- zenoh/src/queryable.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 36a2c51ba0..26be68ac8f 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -220,6 +220,7 @@ impl Query { } }) } + #[cfg(feature = "unstable")] fn _accepts_any_replies(&self) -> ZResult { Ok(self.parameters().accept_any_keyexpr()?.unwrap_or(false)) } @@ -407,9 +408,12 @@ impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { impl Query { fn _reply_sample(&self, sample: Sample) -> ZResult<()> { - if !self._accepts_any_replies().unwrap_or(false) - && !self.key_expr().intersects(&sample.key_expr) - { + let c = zcondfeat!( + "unstable", + !self._accepts_any_replies().unwrap_or(false), + true + ); + if c && !self.key_expr().intersects(&sample.key_expr) { bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) } #[cfg(not(feature = "unstable"))] From 418ca2be575969a1e0a4b02da1ee1c0cb9cedef2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Sun, 14 Apr 2024 18:09:53 +0200 Subject: [PATCH 218/357] errno added --- zenoh/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c0840c8829..65048a2d37 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -121,6 +121,7 @@ pub mod core { pub use zenoh_result::Error; /// A zenoh result. pub use zenoh_result::ZResult as Result; + pub use zenoh_util::core::zresult::ErrNo; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate @@ -348,10 +349,10 @@ pub mod internal { pub use zenoh_macros::unstable; pub use zenoh_result::bail; pub use zenoh_sync::Condition; + pub use zenoh_task::TerminatableTask; pub use zenoh_util::core::ResolveFuture; pub use zenoh_util::LibLoader; pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; - pub use zenoh_task::TerminatableTask; } #[cfg(feature = "shared-memory")] From f321cda42672885176ace0e58ddb749066cf72da Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 15 Apr 2024 12:55:50 +0200 Subject: [PATCH 219/357] compilation fixes --- zenoh/src/api/builders/publication.rs | 5 +- zenoh/src/api/builders/sample.rs | 3 +- zenoh/src/api/query.rs | 1 + zenoh/src/api/queryable.rs | 1 + zenoh/src/api/sample.rs | 10 +- zenoh/src/api/scouting.rs | 3 +- zenoh/src/api/value.rs | 2 +- zenoh/src/lib.rs | 143 +------------------------- 8 files changed, 16 insertions(+), 152 deletions(-) diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index b6ebb0bad2..9a95317488 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -16,6 +16,8 @@ use std::future::Ready; use crate::api::builders::sample::SampleBuilderTrait; use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; use crate::api::key_expr::KeyExpr; +#[cfg(feature = "unstable")] +use crate::api::payload::OptionPayload; use crate::api::publication::Priority; #[cfg(feature = "unstable")] use crate::api::sample::Attachment; @@ -156,7 +158,8 @@ impl SampleBuilderTrait for PublicationBuilder { } } #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: TA) -> Self { + fn attachment>(self, attachment: TA) -> Self { + let attachment: OptionPayload = attachment.into(); Self { attachment: attachment.into(), ..self diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 736e982b1c..55a028f687 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -26,8 +26,7 @@ use crate::api::sample::Sample; use crate::api::sample::SampleKind; use crate::api::value::Value; #[cfg(feature = "unstable")] -use crate::{payload::OptionPayload, sample::SourceInfo}; -use std::marker::PhantomData; +use crate::{api::payload::OptionPayload, sample::SourceInfo}; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index d2a6d822e4..4152b01283 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -33,6 +33,7 @@ use zenoh_result::ZResult; #[zenoh_macros::unstable] use super::{ builders::sample::SampleBuilderTrait, + payload::OptionPayload, sample::{Attachment, SourceInfo}, }; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 48e6955097..c8c344b074 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -40,6 +40,7 @@ use zenoh_result::ZResult; use { super::{ builders::sample::SampleBuilderTrait, + payload::OptionPayload, query::ReplyKeyExpr, sample::{Attachment, SourceInfo}, }, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 26ff586c59..d9ffb363dd 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -13,12 +13,8 @@ // use super::{ - builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, - encoding::Encoding, - key_expr::KeyExpr, - payload::Payload, - publication::Priority, - value::Value, + builders::sample::QoSBuilderTrait, encoding::Encoding, key_expr::KeyExpr, payload::Payload, + publication::Priority, value::Value, }; use std::{convert::TryFrom, fmt}; use zenoh_protocol::{ @@ -27,7 +23,7 @@ use zenoh_protocol::{ }; #[zenoh_macros::unstable] -pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; +pub use attachment::Attachment; #[zenoh_macros::unstable] use serde::Serialize; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 058ab82058..5aa456ae29 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -373,6 +373,7 @@ where ScoutBuilder { what: what.into(), config: config.try_into().map_err(|e| e.into()), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } + diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs index 06d923b7d0..9938015d78 100644 --- a/zenoh/src/api/value.rs +++ b/zenoh/src/api/value.rs @@ -13,7 +13,7 @@ // //! Value primitives. -use super::{builders::sample::ValueBuilderTrait, encoding::Encoding, payload::Payload}; +use super::{encoding::Encoding, payload::Payload}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index eb51b22dbd..2607a0d2db 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -207,13 +207,13 @@ pub mod sample { pub use crate::api::builders::sample::TimestampBuilderTrait; pub use crate::api::builders::sample::ValueBuilderTrait; #[zenoh_macros::unstable] + pub use crate::api::sample::Attachment; + #[zenoh_macros::unstable] pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; pub use crate::api::sample::SampleKind; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; - #[zenoh_macros::unstable] - pub use crate::api::sample::{Attachment, AttachmentBuilder, AttachmentIterator}; } /// Value primitives @@ -290,7 +290,7 @@ pub mod handlers { pub use crate::api::handlers::locked; pub use crate::api::handlers::DefaultHandler; pub use crate::api::handlers::IntoHandler; - pub use crate::api::handlers::RingBuffer; + pub use crate::api::handlers::RingChannel; } /// Scouting primitives @@ -317,143 +317,6 @@ pub mod time { pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; } -/// Scouting primitives. -pub mod scouting; - -/// Scout for routers and/or peers. -/// -/// [`scout`] spawns a task that periodically sends scout messages and waits for [`Hello`](crate::scouting::Hello) replies. -/// -/// Drop the returned [`Scout`](crate::scouting::Scout) to stop the scouting task. -/// -/// # Arguments -/// -/// * `what` - The kind of zenoh process to scout for -/// * `config` - The configuration [`Config`] to use for scouting -/// -/// # Examples -/// ```no_run -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; -/// -/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) -/// .res() -/// .await -/// .unwrap(); -/// while let Ok(hello) = receiver.recv_async().await { -/// println!("{}", hello); -/// } -/// # } -/// ``` -pub fn scout, TryIntoConfig>( - what: I, - config: TryIntoConfig, -) -> ScoutBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: - Into, -{ - ScoutBuilder { - what: what.into(), - config: config.try_into().map_err(|e| e.into()), - handler: DefaultHandler::default(), - } -} - -/// Open a zenoh [`Session`]. -/// -/// # Arguments -/// -/// * `config` - The [`Config`] for the zenoh session -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// # } -/// ``` -/// -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use std::str::FromStr; -/// use zenoh::prelude::r#async::*; -/// -/// let mut config = config::peer(); -/// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); -/// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); -/// -/// let session = zenoh::open(config).res().await.unwrap(); -/// # } -/// ``` -pub fn open(config: TryIntoConfig) -> OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - OpenBuilder { config } -} - -/// A builder returned by [`open`] used to open a zenoh [`Session`]. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - config: TryIntoConfig, -} - -impl Resolvable for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - type To = ZResult; -} - -impl SyncResolve for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - fn res_sync(self) -> ::To { - let config: crate::config::Config = self - .config - .try_into() - .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; - Session::new(config).res_sync() - } -} - -impl AsyncResolve for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - /// Initialize a Session with an existing Runtime. /// This operation is used by the plugins to share the same Runtime as the router. #[doc(hidden)] From ecfc6c506b55fcde5cb91b22444cbb4a49c8d252 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 15 Apr 2024 12:57:25 +0200 Subject: [PATCH 220/357] compilation fixes --- zenoh/src/handlers/mod.rs | 2 +- zenoh/src/handlers/ring.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/handlers/mod.rs b/zenoh/src/handlers/mod.rs index 627c166795..289af7f1cc 100644 --- a/zenoh/src/handlers/mod.rs +++ b/zenoh/src/handlers/mod.rs @@ -21,7 +21,7 @@ pub use callback::*; pub use fifo::*; pub use ring::*; -use crate::API_DATA_RECEPTION_CHANNEL_SIZE; +use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; /// An alias for `Arc`. pub type Dyn = std::sync::Arc; diff --git a/zenoh/src/handlers/ring.rs b/zenoh/src/handlers/ring.rs index 341a3efadd..36e4fb53a0 100644 --- a/zenoh/src/handlers/ring.rs +++ b/zenoh/src/handlers/ring.rs @@ -13,7 +13,7 @@ // //! Callback handler trait. -use crate::API_DATA_RECEPTION_CHANNEL_SIZE; +use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; use super::{callback::Callback, Dyn, IntoHandler}; use std::sync::{Arc, Weak}; From c36a0fa1279b8b94190ae726132d3cdfa3a78adb Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 15 Apr 2024 12:58:31 +0200 Subject: [PATCH 221/357] compilation fixes --- zenoh/src/{ => api}/handlers/callback.rs | 0 zenoh/src/{ => api}/handlers/fifo.rs | 0 zenoh/src/{ => api}/handlers/mod.rs | 0 zenoh/src/{ => api}/handlers/ring.rs | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename zenoh/src/{ => api}/handlers/callback.rs (100%) rename zenoh/src/{ => api}/handlers/fifo.rs (100%) rename zenoh/src/{ => api}/handlers/mod.rs (100%) rename zenoh/src/{ => api}/handlers/ring.rs (100%) diff --git a/zenoh/src/handlers/callback.rs b/zenoh/src/api/handlers/callback.rs similarity index 100% rename from zenoh/src/handlers/callback.rs rename to zenoh/src/api/handlers/callback.rs diff --git a/zenoh/src/handlers/fifo.rs b/zenoh/src/api/handlers/fifo.rs similarity index 100% rename from zenoh/src/handlers/fifo.rs rename to zenoh/src/api/handlers/fifo.rs diff --git a/zenoh/src/handlers/mod.rs b/zenoh/src/api/handlers/mod.rs similarity index 100% rename from zenoh/src/handlers/mod.rs rename to zenoh/src/api/handlers/mod.rs diff --git a/zenoh/src/handlers/ring.rs b/zenoh/src/api/handlers/ring.rs similarity index 100% rename from zenoh/src/handlers/ring.rs rename to zenoh/src/api/handlers/ring.rs From ae4c4934f4df9b2e305153594d3f6ee5ad0437ac Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 14:08:52 +0200 Subject: [PATCH 222/357] Selector time_range API is unstable --- zenoh/src/selector.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 51ee72f98a..1598975e1c 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -96,11 +96,13 @@ impl<'a> Selector<'a> { (self.key_expr, self.parameters) } + #[zenoh_macros::unstable] /// Sets the time range targeted by the selector. pub fn set_time_range>>(&mut self, time_range: T) { self.parameters_mut().set_time_range(time_range); } + #[zenoh_macros::unstable] /// Sets the `parameters` part of this `Selector`. #[inline(always)] pub fn with_parameters(mut self, parameters: &'a str) -> Self { @@ -108,6 +110,7 @@ impl<'a> Selector<'a> { self } + #[zenoh_macros::unstable] /// Extracts the standardized `_time` argument from the selector parameters. /// /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. From f1eb8a5480b5bc0cfeb8224a75e65e779c234df1 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 14:10:34 +0200 Subject: [PATCH 223/357] Selector time_range API is unstable --- zenoh/src/selector.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 1598975e1c..1fc91fbc6a 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -190,6 +190,7 @@ impl From> for HashMap { } impl Parameters<'_> { + #[zenoh_macros::unstable] /// Sets the time range targeted by the selector. pub fn set_time_range>>(&mut self, time_range: T) { let mut time_range: Option = time_range.into(); @@ -199,6 +200,7 @@ impl Parameters<'_> { }; } + #[zenoh_macros::unstable] /// Extracts the standardized `_time` argument from the selector parameters. /// /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. From 2992ea13e28b1c97c53bc90b1799c0f48720d020 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 14:55:06 +0200 Subject: [PATCH 224/357] Improve Properties docs --- commons/zenoh-collections/src/properties.rs | 40 +++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs index 800d2ed9dc..6ce36b9c53 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-collections/src/properties.rs @@ -20,19 +20,50 @@ use std::collections::HashMap; /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties /// and `=` as separator between a key and its value. Keys and values are trimed. +/// +/// Example: +/// ``` +/// use zenoh_collections::Properties; +/// +/// let a = "a=1;b=2;c=3|4|5;d=6"; +/// let p = Properties::from(a); +/// +/// // Retrieve values +/// assert!(!p.is_empty()); +/// assert_eq!(p.get("a").unwrap(), "1"); +/// assert_eq!(p.get("b").unwrap(), "2"); +/// assert_eq!(p.get("c").unwrap(), "3|4|5"); +/// assert_eq!(p.get("d").unwrap(), "6"); +/// assert_eq!(p.values("c").collect::>(), vec!["3", "4", "5"]); +/// +/// // Iterate over properties +/// let mut iter = p.iter(); +/// assert_eq!(iter.next().unwrap(), ("a", "1")); +/// assert_eq!(iter.next().unwrap(), ("b", "2")); +/// assert_eq!(iter.next().unwrap(), ("c", "3|4|5")); +/// assert_eq!(iter.next().unwrap(), ("d", "6")); +/// assert!(iter.next().is_none()); +/// +/// // Create properties from iterators +/// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); +/// assert_eq!(p, pi); +/// ``` #[non_exhaustive] #[derive(Clone, PartialEq, Eq, Default)] pub struct Properties<'s>(Cow<'s, str>); impl Properties<'_> { + /// Returns `true` if properties does not contain anything. pub fn is_empty(&self) -> bool { self.0.is_empty() } + /// Returns properties as [`str`]. pub fn as_str(&self) -> &str { &self.0 } + /// Returns `true` if properties contains the specified key. pub fn contains_key(&self, k: K) -> bool where K: Borrow, @@ -40,6 +71,7 @@ impl Properties<'_> { self.get(k).is_some() } + /// Returns a reference to the value corresponding to the key. pub fn get(&self, k: K) -> Option<&str> where K: Borrow, @@ -47,6 +79,7 @@ impl Properties<'_> { Parameters::get(self.as_str(), k.borrow()) } + /// Returns an iterator to the values corresponding to the key. pub fn values(&self, k: K) -> impl DoubleEndedIterator where K: Borrow, @@ -54,10 +87,14 @@ impl Properties<'_> { Parameters::values(self.as_str(), k.borrow()) } + /// Returns an iterator on the key-value pairs as `(&str, &str)`. pub fn iter(&self) -> impl DoubleEndedIterator + Clone { Parameters::iter(self.as_str()) } + /// Inserts a key-value pair into the map. + /// If the map did not have this key present, [`None`]` is returned. + /// If the map did have this key present, the value is updated, and the old value is returned. pub fn insert(&mut self, k: K, v: V) -> Option where K: Borrow, @@ -69,6 +106,7 @@ impl Properties<'_> { removed } + /// Removes a key from the map, returning the value at the key if the key was previously in the properties. pub fn remove(&mut self, k: K) -> Option where K: Borrow, @@ -79,6 +117,7 @@ impl Properties<'_> { removed } + /// Join an iterator of key-value pairs `(&str, &str)` into properties. pub fn join<'s, I, K, V>(&mut self, iter: I) where I: Iterator + Clone, @@ -91,6 +130,7 @@ impl Properties<'_> { )); } + /// Convert these properties into owned properties. pub fn into_owned(self) -> Properties<'static> { Properties(Cow::Owned(self.0.into_owned())) } From c87a17285f69f067d2b622facb0e784f13455897 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 16:01:38 +0200 Subject: [PATCH 225/357] Move parameters and properties in zenoh_protocol::core --- Cargo.lock | 2 +- commons/zenoh-collections/src/lib.rs | 8 -------- commons/zenoh-protocol/src/core/endpoint.rs | 6 ++++-- commons/zenoh-protocol/src/core/mod.rs | 6 ++++++ .../src => zenoh-protocol/src/core}/parameters.rs | 0 .../src => zenoh-protocol/src/core}/properties.rs | 2 +- commons/zenoh-runtime/Cargo.toml | 2 +- commons/zenoh-runtime/src/lib.rs | 2 +- io/zenoh-links/zenoh-link-quic/src/lib.rs | 3 +-- io/zenoh-links/zenoh-link-tls/src/lib.rs | 3 +-- io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs | 3 +-- io/zenoh-transport/src/multicast/manager.rs | 7 ++++--- io/zenoh-transport/src/unicast/manager.rs | 3 +-- zenoh/src/payload.rs | 4 ++-- zenoh/src/selector.rs | 6 ++++-- 15 files changed, 28 insertions(+), 29 deletions(-) rename commons/{zenoh-collections/src => zenoh-protocol/src/core}/parameters.rs (100%) rename commons/{zenoh-collections/src => zenoh-protocol/src/core}/properties.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 07e166c57c..445fe76075 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5030,7 +5030,7 @@ dependencies = [ "futures", "lazy_static", "tokio", - "zenoh-collections", + "zenoh-protocol", "zenoh-result", ] diff --git a/commons/zenoh-collections/src/lib.rs b/commons/zenoh-collections/src/lib.rs index 6690d372da..6549594de2 100644 --- a/commons/zenoh-collections/src/lib.rs +++ b/commons/zenoh-collections/src/lib.rs @@ -20,9 +20,6 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; -pub mod parameters; -pub use parameters::*; - pub mod single_or_vec; pub use single_or_vec::*; @@ -35,8 +32,3 @@ pub use ring_buffer::*; pub mod stack_buffer; #[cfg(feature = "std")] pub use stack_buffer::*; - -#[cfg(feature = "std")] -pub mod properties; -#[cfg(feature = "std")] -pub use properties::*; diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index debe7da7b5..3397147369 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::locator::*; +use super::{ + locator::*, + parameters::{Parameters, SortedParameters}, +}; use alloc::{borrow::ToOwned, format, string::String}; use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; -use zenoh_collections::{Parameters, SortedParameters}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; // Parsing chars diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 20fcf85dd9..0920d55d01 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -53,6 +53,12 @@ pub use endpoint::*; pub mod resolution; pub use resolution::*; +pub mod parameters; +pub use parameters::*; + +pub mod properties; +pub use properties::*; + /// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] diff --git a/commons/zenoh-collections/src/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs similarity index 100% rename from commons/zenoh-collections/src/parameters.rs rename to commons/zenoh-protocol/src/core/parameters.rs diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-protocol/src/core/properties.rs similarity index 99% rename from commons/zenoh-collections/src/properties.rs rename to commons/zenoh-protocol/src/core/properties.rs index 6ce36b9c53..d2f0506d46 100644 --- a/commons/zenoh-collections/src/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; +use super::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; use alloc::borrow::Cow; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] diff --git a/commons/zenoh-runtime/Cargo.toml b/commons/zenoh-runtime/Cargo.toml index e5bd64b8c5..350bed9c26 100644 --- a/commons/zenoh-runtime/Cargo.toml +++ b/commons/zenoh-runtime/Cargo.toml @@ -16,5 +16,5 @@ description = { workspace = true } futures = { workspace = true } lazy_static = { workspace = true } zenoh-result = { workspace = true, features = ["std"] } -zenoh-collections = { workspace = true, features = ["std"] } +zenoh-protocol = { workspace = true } tokio = { workspace = true, features = ["fs", "io-util", "macros", "net", "rt-multi-thread", "sync", "time"] } diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs index 492e0a6665..0baefd73ef 100644 --- a/commons/zenoh-runtime/src/lib.rs +++ b/commons/zenoh-runtime/src/lib.rs @@ -25,7 +25,7 @@ use std::{ time::Duration, }; use tokio::runtime::{Handle, Runtime, RuntimeFlavor}; -use zenoh_collections::Properties; +use zenoh_protocol::core::Properties; use zenoh_result::ZResult as Result; const ZENOH_RUNTIME_THREADS_ENV: &str = "ZENOH_RUNTIME_THREADS"; diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index 7f5e2a1587..0c4ae4937b 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -25,12 +25,11 @@ use config::{ }; use secrecy::ExposeSecret; use std::net::SocketAddr; -use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; use zenoh_protocol::{ - core::{endpoint::Address, Locator}, + core::{endpoint::Address, Locator, Parameters}, transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; diff --git a/io/zenoh-links/zenoh-link-tls/src/lib.rs b/io/zenoh-links/zenoh-link-tls/src/lib.rs index dae8227cad..f6a7968326 100644 --- a/io/zenoh-links/zenoh-link-tls/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tls/src/lib.rs @@ -27,12 +27,11 @@ use config::{ use rustls_pki_types::ServerName; use secrecy::ExposeSecret; use std::{convert::TryFrom, net::SocketAddr}; -use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; use zenoh_protocol::{ - core::{endpoint::Address, Locator}, + core::{endpoint::Address, Locator, Parameters}, transport::BatchSize, }; use zenoh_result::{bail, zerror, ZResult}; diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs index 70d3d4dddc..61c891da33 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs @@ -21,11 +21,10 @@ pub mod unicast; use async_trait::async_trait; pub use unicast::*; -use zenoh_collections::Parameters; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::Locator; +use zenoh_protocol::core::{Locator, Parameters}; use zenoh_result::ZResult; pub const UNIXPIPE_LOCATOR_PREFIX: &str = "unixpipe"; diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index a6f682edc9..421664e954 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -19,7 +19,6 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use tokio::sync::Mutex; -use zenoh_collections::Parameters; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionMulticastConf; #[cfg(feature = "shared-memory")] @@ -27,8 +26,10 @@ use zenoh_config::SharedMemoryConf; use zenoh_config::{Config, LinkTxConf}; use zenoh_core::zasynclock; use zenoh_link::*; -use zenoh_protocol::core::ZenohId; -use zenoh_protocol::transport::close; +use zenoh_protocol::{ + core::{Parameters, ZenohId}, + transport::close, +}; use zenoh_result::{bail, zerror, ZResult}; pub struct TransportManagerConfigMulticast { diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 0fdce265f9..ab31376788 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -36,7 +36,6 @@ use std::{ time::Duration, }; use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; -use zenoh_collections::Parameters; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionUnicastConf; #[cfg(feature = "shared-memory")] @@ -46,7 +45,7 @@ use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; use zenoh_link::*; use zenoh_protocol::{ - core::ZenohId, + core::{Parameters, ZenohId}, transport::{close, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs index 3f9fed1e90..5280c7af3c 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/payload.rs @@ -28,7 +28,7 @@ use zenoh_buffers::{ ZBufReader, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_collections::Properties; +use zenoh_protocol::core::Properties; use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::SharedMemoryBuf; @@ -1371,7 +1371,7 @@ mod tests { use rand::Rng; use std::borrow::Cow; use zenoh_buffers::{ZBuf, ZSlice}; - use zenoh_collections::Properties; + use zenoh_protocol::core::Properties; const NUM: usize = 1_000; diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 1fc91fbc6a..893bfdb8a3 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -21,8 +21,10 @@ use std::{ ops::{Deref, DerefMut}, str::FromStr, }; -use zenoh_collections::Properties; -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_protocol::core::{ + key_expr::{keyexpr, OwnedKeyExpr}, + Properties, +}; use zenoh_result::ZResult; use zenoh_util::time_range::TimeRange; From 786d4199dde1eb77ea5863c6e5605b26f7e06149 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 15 Apr 2024 16:31:30 +0200 Subject: [PATCH 226/357] Interest network message (#915) * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs * Remove unused UndeclareInterest * Add new Interest network message * Update doc * Update codec * Minor rework on interest message * Fix range in declare rand * Fix codec tests * Merge protocol_changes --------- Co-authored-by: Luca Cominardi --- commons/zenoh-codec/src/network/declare.rs | 129 +----- commons/zenoh-codec/src/network/interest.rs | 186 +++++++++ commons/zenoh-codec/src/network/mod.rs | 3 + commons/zenoh-protocol/src/network/declare.rs | 379 +---------------- .../zenoh-protocol/src/network/interest.rs | 383 ++++++++++++++++++ commons/zenoh-protocol/src/network/mod.rs | 13 +- io/zenoh-transport/src/shm.rs | 10 +- zenoh/src/key_expr.rs | 4 +- zenoh/src/net/primitives/demux.rs | 1 + zenoh/src/net/primitives/mod.rs | 8 +- zenoh/src/net/primitives/mux.rs | 55 ++- zenoh/src/net/routing/dispatcher/face.rs | 9 +- zenoh/src/net/routing/dispatcher/resource.rs | 4 +- zenoh/src/net/routing/hat/client/pubsub.rs | 10 +- zenoh/src/net/routing/hat/client/queries.rs | 8 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 14 +- .../net/routing/hat/linkstate_peer/queries.rs | 14 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 10 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 8 +- zenoh/src/net/routing/hat/router/pubsub.rs | 22 +- zenoh/src/net/routing/hat/router/queries.rs | 22 +- zenoh/src/net/routing/mod.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 13 +- zenoh/src/net/tests/tables.rs | 14 +- zenoh/src/session.rs | 20 +- 25 files changed, 771 insertions(+), 570 deletions(-) create mode 100644 commons/zenoh-codec/src/network/interest.rs create mode 100644 commons/zenoh-protocol/src/network/interest.rs diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index 6e9dad12ce..ed3d019950 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -19,17 +19,10 @@ use zenoh_buffers::{ ZBuf, }; use zenoh_protocol::{ - common::{ - iext, - imsg::{self, HEADER_BITS}, - ZExtZ64, - }, + common::{iext, imsg, ZExtZ64}, core::{ExprId, ExprLen, WireExpr}, network::{ - declare::{ - self, common, interest, keyexpr, queryable, subscriber, token, Declare, DeclareBody, - DeclareMode, Interest, - }, + declare::{self, common, keyexpr, queryable, subscriber, token, Declare, DeclareBody}, id, Mapping, }, }; @@ -51,7 +44,6 @@ where DeclareBody::UndeclareQueryable(r) => self.write(&mut *writer, r)?, DeclareBody::DeclareToken(r) => self.write(&mut *writer, r)?, DeclareBody::UndeclareToken(r) => self.write(&mut *writer, r)?, - DeclareBody::DeclareInterest(r) => self.write(&mut *writer, r)?, DeclareBody::DeclareFinal(r) => self.write(&mut *writer, r)?, } @@ -79,7 +71,6 @@ where U_QUERYABLE => DeclareBody::UndeclareQueryable(codec.read(&mut *reader)?), D_TOKEN => DeclareBody::DeclareToken(codec.read(&mut *reader)?), U_TOKEN => DeclareBody::UndeclareToken(codec.read(&mut *reader)?), - D_INTEREST => DeclareBody::DeclareInterest(codec.read(&mut *reader)?), D_FINAL => DeclareBody::DeclareFinal(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -97,7 +88,7 @@ where fn write(self, writer: &mut W, x: &Declare) -> Self::Output { let Declare { - mode, + interest_id, ext_qos, ext_tstamp, ext_nodeid, @@ -106,13 +97,9 @@ where // Header let mut header = id::DECLARE; - header |= match mode { - DeclareMode::Push => 0b00, - DeclareMode::Response(_) => 0b01, - DeclareMode::Request(_) => 0b10, - DeclareMode::RequestContinuous(_) => 0b11, - } << HEADER_BITS; - + if x.interest_id.is_some() { + header |= declare::flag::I; + } let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); @@ -121,12 +108,8 @@ where } self.write(&mut *writer, header)?; - // Body - if let DeclareMode::Request(rid) - | DeclareMode::RequestContinuous(rid) - | DeclareMode::Response(rid) = mode - { - self.write(&mut *writer, rid)?; + if let Some(interest_id) = interest_id { + self.write(&mut *writer, interest_id)?; } // Extensions @@ -175,14 +158,10 @@ where return Err(DidntRead); } - // Body - let mode = match (self.header >> HEADER_BITS) & 0b11 { - 0b00 => DeclareMode::Push, - 0b01 => DeclareMode::Response(self.codec.read(&mut *reader)?), - 0b10 => DeclareMode::Request(self.codec.read(&mut *reader)?), - 0b11 => DeclareMode::RequestContinuous(self.codec.read(&mut *reader)?), - _ => return Err(DidntRead), - }; + let mut interest_id = None; + if imsg::has_flag(self.header, declare::flag::I) { + interest_id = Some(self.codec.read(&mut *reader)?); + } // Extensions let mut ext_qos = declare::ext::QoSType::DEFAULT; @@ -219,7 +198,7 @@ where let body: DeclareBody = self.codec.read(&mut *reader)?; Ok(Declare { - mode, + interest_id, ext_qos, ext_tstamp, ext_nodeid, @@ -938,7 +917,7 @@ where // Extensions let mut ext_wire_expr = common::ext::WireExprType::null(); - let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); + let mut has_ext = imsg::has_flag(self.header, token::flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; let eodec = Zenoh080Header::new(ext); @@ -958,86 +937,6 @@ where } } -// DeclareInterest -impl WCodec<&interest::DeclareInterest, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &interest::DeclareInterest) -> Self::Output { - let interest::DeclareInterest { - interest: _, - wire_expr, - } = x; - - // Header - let header = declare::id::D_INTEREST; - self.write(&mut *writer, header)?; - - // Body - self.write(&mut *writer, x.options())?; - if let Some(we) = wire_expr.as_ref() { - self.write(&mut *writer, we)?; - } - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != declare::id::D_INTEREST { - return Err(DidntRead); - } - - // Body - let options: u8 = self.codec.read(&mut *reader)?; - let interest = Interest::from(options); - - let mut wire_expr = None; - if interest.restricted() { - let ccond = Zenoh080Condition::new(interest.named()); - let mut we: WireExpr<'static> = ccond.read(&mut *reader)?; - we.mapping = if interest.mapping() { - Mapping::Sender - } else { - Mapping::Receiver - }; - wire_expr = Some(we); - } - - // Extensions - let has_ext = imsg::has_flag(self.header, token::flag::Z); - if has_ext { - extension::skip_all(reader, "DeclareInterest")?; - } - - Ok(interest::DeclareInterest { - interest, - wire_expr, - }) - } -} - // WARNING: this is a temporary extension used for undeclarations impl WCodec<(&common::ext::WireExprType, bool), &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/interest.rs b/commons/zenoh-codec/src/network/interest.rs new file mode 100644 index 0000000000..9d1e64de76 --- /dev/null +++ b/commons/zenoh-codec/src/network/interest.rs @@ -0,0 +1,186 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; +use zenoh_buffers::{ + reader::{DidntRead, Reader}, + writer::{DidntWrite, Writer}, +}; +use zenoh_protocol::{ + common::{ + iext, + imsg::{self, HEADER_BITS}, + }, + core::WireExpr, + network::{ + declare, id, + interest::{self, Interest, InterestMode, InterestOptions}, + Mapping, + }, +}; + +// Interest +impl WCodec<&Interest, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &Interest) -> Self::Output { + let Interest { + id, + mode, + options: _, // Compute the options on-the-fly according to Interest fields + wire_expr, + ext_qos, + ext_tstamp, + ext_nodeid, + } = x; + + // Header + let mut header = id::INTEREST; + header |= match mode { + InterestMode::Final => 0b00, + InterestMode::Current => 0b01, + InterestMode::Future => 0b10, + InterestMode::CurrentFuture => 0b11, + } << HEADER_BITS; + let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + + (ext_tstamp.is_some() as u8) + + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); + if n_exts != 0 { + header |= declare::flag::Z; + } + self.write(&mut *writer, header)?; + + self.write(&mut *writer, id)?; + + if *mode != InterestMode::Final { + self.write(&mut *writer, x.options())?; + if let Some(we) = wire_expr.as_ref() { + self.write(&mut *writer, we)?; + } + } + + // Extensions + if ext_qos != &declare::ext::QoSType::DEFAULT { + n_exts -= 1; + self.write(&mut *writer, (*ext_qos, n_exts != 0))?; + } + if let Some(ts) = ext_tstamp.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (ts, n_exts != 0))?; + } + if ext_nodeid != &declare::ext::NodeIdType::DEFAULT { + n_exts -= 1; + self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; + } + + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + if imsg::mid(self.header) != id::INTEREST { + return Err(DidntRead); + } + + let id = self.codec.read(&mut *reader)?; + let mode = match (self.header >> HEADER_BITS) & 0b11 { + 0b00 => InterestMode::Final, + 0b01 => InterestMode::Current, + 0b10 => InterestMode::Future, + 0b11 => InterestMode::CurrentFuture, + _ => return Err(DidntRead), + }; + + let mut options = InterestOptions::empty(); + let mut wire_expr = None; + if mode != InterestMode::Final { + let options_byte: u8 = self.codec.read(&mut *reader)?; + options = InterestOptions::from(options_byte); + if options.restricted() { + let ccond = Zenoh080Condition::new(options.named()); + let mut we: WireExpr<'static> = ccond.read(&mut *reader)?; + we.mapping = if options.mapping() { + Mapping::Sender + } else { + Mapping::Receiver + }; + wire_expr = Some(we); + } + } + + // Extensions + let mut ext_qos = declare::ext::QoSType::DEFAULT; + let mut ext_tstamp = None; + let mut ext_nodeid = declare::ext::NodeIdType::DEFAULT; + + let mut has_ext = imsg::has_flag(self.header, declare::flag::Z); + while has_ext { + let ext: u8 = self.codec.read(&mut *reader)?; + let eodec = Zenoh080Header::new(ext); + match iext::eid(ext) { + declare::ext::QoS::ID => { + let (q, ext): (interest::ext::QoSType, bool) = eodec.read(&mut *reader)?; + ext_qos = q; + has_ext = ext; + } + declare::ext::Timestamp::ID => { + let (t, ext): (interest::ext::TimestampType, bool) = + eodec.read(&mut *reader)?; + ext_tstamp = Some(t); + has_ext = ext; + } + declare::ext::NodeId::ID => { + let (nid, ext): (interest::ext::NodeIdType, bool) = eodec.read(&mut *reader)?; + ext_nodeid = nid; + has_ext = ext; + } + _ => { + has_ext = extension::skip(reader, "Declare", ext)?; + } + } + } + + Ok(Interest { + id, + mode, + options, + wire_expr, + ext_qos, + ext_tstamp, + ext_nodeid, + }) + } +} diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index 3a227cd42a..5ebdb17b8e 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // mod declare; +mod interest; mod oam; mod push; mod request; @@ -45,6 +46,7 @@ where NetworkBody::Request(b) => self.write(&mut *writer, b), NetworkBody::Response(b) => self.write(&mut *writer, b), NetworkBody::ResponseFinal(b) => self.write(&mut *writer, b), + NetworkBody::Interest(b) => self.write(&mut *writer, b), NetworkBody::Declare(b) => self.write(&mut *writer, b), NetworkBody::OAM(b) => self.write(&mut *writer, b), } @@ -89,6 +91,7 @@ where id::REQUEST => NetworkBody::Request(self.read(&mut *reader)?), id::RESPONSE => NetworkBody::Response(self.read(&mut *reader)?), id::RESPONSE_FINAL => NetworkBody::ResponseFinal(self.read(&mut *reader)?), + id::INTEREST => NetworkBody::Interest(self.read(&mut *reader)?), id::DECLARE => NetworkBody::Declare(self.read(&mut *reader)?), id::OAM => NetworkBody::OAM(self.read(&mut *reader)?), _ => return Err(DidntRead), diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 31e8adcc6e..9a41f42e56 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -19,8 +19,6 @@ use crate::{ }; use alloc::borrow::Cow; pub use common::*; -use core::sync::atomic::AtomicU32; -pub use interest::*; pub use keyexpr::*; pub use queryable::*; pub use subscriber::*; @@ -33,59 +31,24 @@ pub mod flag { } /// Flags: -/// - |: Mode The mode of the the declaration* -/// -/ +/// - I: Interest If I==1 then interest_id is present +/// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|Mod| DECLARE | +/// |Z|X|I| DECLARE | /// +-+-+-+---------+ -/// ~ rid:z32 ~ if Mode != Push +/// ~interest_id:z32~ if I==1 /// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// ~ declaration ~ /// +---------------+ /// -/// *Mode of declaration: -/// - Mode 0b00: Push -/// - Mode 0b01: Response -/// - Mode 0b10: Request -/// - Mode 0b11: RequestContinuous - -/// The resolution of a RequestId -pub type DeclareRequestId = u32; -pub type AtomicDeclareRequestId = AtomicU32; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum DeclareMode { - Push, - Request(DeclareRequestId), - RequestContinuous(DeclareRequestId), - Response(DeclareRequestId), -} - -impl DeclareMode { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - - let mut rng = rand::thread_rng(); - - match rng.gen_range(0..4) { - 0 => DeclareMode::Push, - 1 => DeclareMode::Request(rng.gen()), - 2 => DeclareMode::RequestContinuous(rng.gen()), - 3 => DeclareMode::Response(rng.gen()), - _ => unreachable!(), - } - } -} - #[derive(Debug, Clone, PartialEq, Eq)] pub struct Declare { - pub mode: DeclareMode, + pub interest_id: Option, pub ext_qos: ext::QoSType, pub ext_tstamp: Option, pub ext_nodeid: ext::NodeIdType, @@ -121,8 +84,6 @@ pub mod id { pub const D_TOKEN: u8 = 0x06; pub const U_TOKEN: u8 = 0x07; - pub const D_INTEREST: u8 = 0x08; - pub const D_FINAL: u8 = 0x1A; } @@ -136,7 +97,6 @@ pub enum DeclareBody { UndeclareQueryable(UndeclareQueryable), DeclareToken(DeclareToken), UndeclareToken(UndeclareToken), - DeclareInterest(DeclareInterest), DeclareFinal(DeclareFinal), } @@ -147,7 +107,7 @@ impl DeclareBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..10) { + match rng.gen_range(0..9) { 0 => DeclareBody::DeclareKeyExpr(DeclareKeyExpr::rand()), 1 => DeclareBody::UndeclareKeyExpr(UndeclareKeyExpr::rand()), 2 => DeclareBody::DeclareSubscriber(DeclareSubscriber::rand()), @@ -156,8 +116,7 @@ impl DeclareBody { 5 => DeclareBody::UndeclareQueryable(UndeclareQueryable::rand()), 6 => DeclareBody::DeclareToken(DeclareToken::rand()), 7 => DeclareBody::UndeclareToken(UndeclareToken::rand()), - 8 => DeclareBody::DeclareInterest(DeclareInterest::rand()), - 9 => DeclareBody::DeclareFinal(DeclareFinal::rand()), + 8 => DeclareBody::DeclareFinal(DeclareFinal::rand()), _ => unreachable!(), } } @@ -170,14 +129,16 @@ impl Declare { let mut rng = rand::thread_rng(); - let mode = DeclareMode::rand(); + let interest_id = rng + .gen_bool(0.5) + .then_some(rng.gen::()); let ext_qos = ext::QoSType::rand(); let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); let ext_nodeid = ext::NodeIdType::rand(); let body = DeclareBody::rand(); Self { - mode, + interest_id, ext_qos, ext_tstamp, ext_nodeid, @@ -197,7 +158,7 @@ pub mod common { /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|x|x| D_FINAL | + /// |Z|X|X| D_FINAL | /// +---------------+ /// ~ [final_exts] ~ if Z==1 /// +---------------+ @@ -714,319 +675,3 @@ pub mod token { } } } - -pub mod interest { - use core::{ - fmt::{self, Debug}, - ops::{Add, AddAssign, Sub, SubAssign}, - }; - - use super::*; - - pub type InterestId = u32; - - pub mod flag { - // pub const X: u8 = 1 << 5; // 0x20 Reserved - // pub const X: u8 = 1 << 6; // 0x40 Reserved - pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow - } - - /// # DeclareInterest message - /// - /// The DECLARE INTEREST message is sent to request the transmission of current and optionally future - /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be - /// sent to request the transmisison of all current subscriptions matching `a/*`. - /// - /// The behaviour of a DECLARE INTEREST depends on the DECLARE MODE in the DECLARE MESSAGE: - /// - Push: invalid - /// - Request: only current declarations - /// - RequestContinous: current and future declarations - /// - Response: invalid - /// - /// E.g., the [`DeclareInterest`] message flow is the following for a Request: - /// - /// ```text - /// A B - /// | DECL INTEREST | - /// |------------------>| -- Sent in Declare::Request. - /// | | This is a DeclareInterest e.g. for subscriber declarations. - /// | | - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Response - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Response - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Response - /// | | - /// | FINAL | - /// |<------------------| -- Sent in Declare::Response - /// ``` - /// - /// - /// And the [`DeclareInterest`] message flow is the following for a RequestContinuous: - /// - /// ```text - /// A B - /// | DECL INTEREST | - /// |------------------>| -- Sent in Declare::RequestContinuous. - /// | | This is a DeclareInterest e.g. for subscriber declarations/undeclarations. - /// | | - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push - /// | | - /// | FINAL | - /// |<------------------| -- Sent in Declare::Response - /// | | - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push. This is a new subscriber declaration. - /// | UNDECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push. This is a new subscriber undeclaration. - /// | | - /// | ... | - /// | | - /// | FINAL | - /// |------------------>| -- Sent in Declare::RequestContinuous. - /// | | This stops the transmission of subscriber declarations/undeclarations. - /// | | - /// ``` - /// - /// The DECLARE INTEREST message structure is defined as follows: - /// - /// ```text - /// Flags: - /// - X: Reserved - /// - X: Reserved - /// - Z: Extension If Z==1 then at least one extension is present - /// - /// 7 6 5 4 3 2 1 0 - /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| D_INT | - /// +---------------+ - /// |A|M|N|R|T|Q|S|K| (*) - /// +---------------+ - /// ~ key_scope:z16 ~ if R==1 - /// +---------------+ - /// ~ key_suffix ~ if R==1 && N==1 -- - /// +---------------+ - /// ~ [decl_exts] ~ if Z==1 - /// +---------------+ - /// - /// (*) - if K==1 then the interest refers to key expressions - /// - if S==1 then the interest refers to subscribers - /// - if Q==1 then the interest refers to queryables - /// - if T==1 then the interest refers to tokens - /// - if R==1 then the interest is restricted to the matching key expression, else it is for all key expressions. - /// - if N==1 then the key expr has name/suffix. If R==0 then N should be set to 0. - /// - if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver. - /// If R==0 then M should be set to 0. - /// - if A==1 then the replies SHOULD be aggregated - /// ``` - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct DeclareInterest { - pub interest: Interest, - pub wire_expr: Option>, - } - - impl DeclareInterest { - pub fn options(&self) -> u8 { - let mut interest = self.interest; - if let Some(we) = self.wire_expr.as_ref() { - interest += Interest::RESTRICTED; - if we.has_suffix() { - interest += Interest::NAMED; - } - if let Mapping::Sender = we.mapping { - interest += Interest::MAPPING; - } - } - interest.options - } - - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); - let interest = Interest::rand(); - - Self { - wire_expr, - interest, - } - } - } - - #[derive(Clone, Copy)] - pub struct Interest { - options: u8, - } - - impl Interest { - // Flags - pub const KEYEXPRS: Interest = Interest::options(1); - pub const SUBSCRIBERS: Interest = Interest::options(1 << 1); - pub const QUERYABLES: Interest = Interest::options(1 << 2); - pub const TOKENS: Interest = Interest::options(1 << 3); - const RESTRICTED: Interest = Interest::options(1 << 4); - const NAMED: Interest = Interest::options(1 << 5); - const MAPPING: Interest = Interest::options(1 << 6); - pub const AGGREGATE: Interest = Interest::options(1 << 7); - pub const ALL: Interest = Interest::options( - Interest::KEYEXPRS.options - | Interest::SUBSCRIBERS.options - | Interest::QUERYABLES.options - | Interest::TOKENS.options, - ); - - const fn options(options: u8) -> Self { - Self { options } - } - - pub const fn empty() -> Self { - Self { options: 0 } - } - - pub const fn keyexprs(&self) -> bool { - imsg::has_flag(self.options, Self::KEYEXPRS.options) - } - - pub const fn subscribers(&self) -> bool { - imsg::has_flag(self.options, Self::SUBSCRIBERS.options) - } - - pub const fn queryables(&self) -> bool { - imsg::has_flag(self.options, Self::QUERYABLES.options) - } - - pub const fn tokens(&self) -> bool { - imsg::has_flag(self.options, Self::TOKENS.options) - } - - pub const fn restricted(&self) -> bool { - imsg::has_flag(self.options, Self::RESTRICTED.options) - } - - pub const fn named(&self) -> bool { - imsg::has_flag(self.options, Self::NAMED.options) - } - - pub const fn mapping(&self) -> bool { - imsg::has_flag(self.options, Self::MAPPING.options) - } - - pub const fn aggregate(&self) -> bool { - imsg::has_flag(self.options, Self::AGGREGATE.options) - } - - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let mut s = Self::empty(); - if rng.gen_bool(0.5) { - s += Interest::KEYEXPRS; - } - if rng.gen_bool(0.5) { - s += Interest::SUBSCRIBERS; - } - if rng.gen_bool(0.5) { - s += Interest::TOKENS; - } - if rng.gen_bool(0.5) { - s += Interest::AGGREGATE; - } - s - } - } - - impl PartialEq for Interest { - fn eq(&self, other: &Self) -> bool { - self.keyexprs() == other.keyexprs() - && self.subscribers() == other.subscribers() - && self.queryables() == other.queryables() - && self.tokens() == other.tokens() - && self.aggregate() == other.aggregate() - } - } - - impl Debug for Interest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Interest {{ ")?; - if self.keyexprs() { - write!(f, "K:Y, ")?; - } else { - write!(f, "K:N, ")?; - } - if self.subscribers() { - write!(f, "S:Y, ")?; - } else { - write!(f, "S:N, ")?; - } - if self.queryables() { - write!(f, "Q:Y, ")?; - } else { - write!(f, "Q:N, ")?; - } - if self.tokens() { - write!(f, "T:Y, ")?; - } else { - write!(f, "T:N, ")?; - } - if self.aggregate() { - write!(f, "A:Y")?; - } else { - write!(f, "A:N")?; - } - write!(f, " }}")?; - Ok(()) - } - } - - impl Eq for Interest {} - - impl Add for Interest { - type Output = Self; - - #[allow(clippy::suspicious_arithmetic_impl)] // Allows to implement Add & Sub for Interest - fn add(self, rhs: Self) -> Self::Output { - Self { - options: self.options | rhs.options, - } - } - } - - impl AddAssign for Interest { - #[allow(clippy::suspicious_op_assign_impl)] // Allows to implement Add & Sub for Interest - fn add_assign(&mut self, rhs: Self) { - self.options |= rhs.options; - } - } - - impl Sub for Interest { - type Output = Self; - - fn sub(self, rhs: Self) -> Self::Output { - Self { - options: self.options & !rhs.options, - } - } - } - - impl SubAssign for Interest { - fn sub_assign(&mut self, rhs: Self) { - self.options &= !rhs.options; - } - } - - impl From for Interest { - fn from(options: u8) -> Self { - Self { options } - } - } -} diff --git a/commons/zenoh-protocol/src/network/interest.rs b/commons/zenoh-protocol/src/network/interest.rs new file mode 100644 index 0000000000..e7eb75787e --- /dev/null +++ b/commons/zenoh-protocol/src/network/interest.rs @@ -0,0 +1,383 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::{common::imsg, core::WireExpr, network::Mapping}; +use core::{ + fmt::{self, Debug}, + ops::{Add, AddAssign, Sub, SubAssign}, + sync::atomic::AtomicU32, +}; + +pub type InterestId = u32; + +pub mod flag { + pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow +} + +/// The INTEREST message is sent to request the transmission of current and optionally future +/// declarations of a given kind matching a target keyexpr. E.g., an interest could be +/// sent to request the transmisison of all current subscriptions matching `a/*`. +/// +/// The behaviour of a INTEREST depends on the INTEREST MODE. +/// +/// E.g., the message flow is the following for an [`Interest`] with mode `Current`: +/// +/// ```text +/// A B +/// | INTEREST | +/// |------------------>| -- Mode: Current +/// | | This is an Interest e.g. for subscriber declarations. +/// | | +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field set +/// | | +/// | DECL FINAL | +/// |<------------------| -- With interest_id field set +/// | | +/// ``` +/// +/// And the message flow is the following for an [`Interest`] with mode `CurrentFuture`: +/// +/// ```text +/// A B +/// | INTEREST | +/// |------------------>| -- This is a DeclareInterest e.g. for subscriber declarations/undeclarations. +/// | | +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | | +/// | DECL FINAL | +/// |<------------------| -- With interest_id field set +/// | | +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | UNDECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | | +/// | ... | +/// | | +/// | INTEREST FINAL | +/// |------------------>| -- Mode: Final +/// | | This stops the transmission of subscriber declarations/undeclarations. +/// | | +/// +/// Flags: +/// - |: Mode The mode of the interest* +/// -/ +/// - Z: Extension If Z==1 then at least one extension is present +/// +/// 7 6 5 4 3 2 1 0 +/// +-+-+-+-+-+-+-+-+ +/// |Z|Mod|INTEREST | +/// +-+-+-+---------+ +/// ~ id:z32 ~ +/// +---------------+ +/// |A|M|N|R|T|Q|S|K| if Mod!=Final (*) +/// +---------------+ +/// ~ key_scope:z16 ~ if Mod!=Final && R==1 +/// +---------------+ +/// ~ key_suffix ~ if Mod!=Final && R==1 && N==1 -- +/// +---------------+ +/// ~ [int_exts] ~ if Z==1 +/// +---------------+ +/// +/// *Mode of declaration: +/// - Mode 0b00: Final +/// - Mode 0b01: Current +/// - Mode 0b10: Future +/// - Mode 0b11: CurrentFuture +/// +/// (*) - if K==1 then the interest refers to key expressions +/// - if S==1 then the interest refers to subscribers +/// - if Q==1 then the interest refers to queryables +/// - if T==1 then the interest refers to tokens +/// - if R==1 then the interest is restricted to the matching key expression, else it is for all key expressions. +/// - if N==1 then the key expr has name/suffix. If R==0 then N should be set to 0. +/// - if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver. +/// If R==0 then M should be set to 0. +/// - if A==1 then the replies SHOULD be aggregated +/// ``` + +/// The resolution of a RequestId +pub type DeclareRequestId = u32; +pub type AtomicDeclareRequestId = AtomicU32; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum InterestMode { + Final, + Current, + Future, + CurrentFuture, +} + +impl InterestMode { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + + let mut rng = rand::thread_rng(); + + match rng.gen_range(0..4) { + 0 => InterestMode::Final, + 1 => InterestMode::Current, + 2 => InterestMode::Future, + 3 => InterestMode::CurrentFuture, + _ => unreachable!(), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Interest { + pub id: InterestId, + pub mode: InterestMode, + pub options: InterestOptions, + pub wire_expr: Option>, + pub ext_qos: ext::QoSType, + pub ext_tstamp: Option, + pub ext_nodeid: ext::NodeIdType, +} + +pub mod ext { + use crate::{ + common::{ZExtZ64, ZExtZBuf}, + zextz64, zextzbuf, + }; + + pub type QoS = zextz64!(0x1, false); + pub type QoSType = crate::network::ext::QoSType<{ QoS::ID }>; + + pub type Timestamp = zextzbuf!(0x2, false); + pub type TimestampType = crate::network::ext::TimestampType<{ Timestamp::ID }>; + + pub type NodeId = zextz64!(0x3, true); + pub type NodeIdType = crate::network::ext::NodeIdType<{ NodeId::ID }>; +} + +impl Interest { + pub fn options(&self) -> u8 { + let mut interest = self.options; + if let Some(we) = self.wire_expr.as_ref() { + interest += InterestOptions::RESTRICTED; + if we.has_suffix() { + interest += InterestOptions::NAMED; + } + if let Mapping::Sender = we.mapping { + interest += InterestOptions::MAPPING; + } + } + interest.options + } + + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let id = rng.gen::(); + let mode = InterestMode::rand(); + let options = InterestOptions::rand(); + let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); + let ext_qos = ext::QoSType::rand(); + let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); + let ext_nodeid = ext::NodeIdType::rand(); + + Self { + id, + mode, + wire_expr, + options, + ext_qos, + ext_tstamp, + ext_nodeid, + } + } +} + +#[repr(transparent)] +#[derive(Clone, Copy)] +pub struct InterestOptions { + options: u8, +} + +impl InterestOptions { + // Flags + pub const KEYEXPRS: InterestOptions = InterestOptions::options(1); + pub const SUBSCRIBERS: InterestOptions = InterestOptions::options(1 << 1); + pub const QUERYABLES: InterestOptions = InterestOptions::options(1 << 2); + pub const TOKENS: InterestOptions = InterestOptions::options(1 << 3); + const RESTRICTED: InterestOptions = InterestOptions::options(1 << 4); + const NAMED: InterestOptions = InterestOptions::options(1 << 5); + const MAPPING: InterestOptions = InterestOptions::options(1 << 6); + pub const AGGREGATE: InterestOptions = InterestOptions::options(1 << 7); + pub const ALL: InterestOptions = InterestOptions::options( + InterestOptions::KEYEXPRS.options + | InterestOptions::SUBSCRIBERS.options + | InterestOptions::QUERYABLES.options + | InterestOptions::TOKENS.options, + ); + + const fn options(options: u8) -> Self { + Self { options } + } + + pub const fn empty() -> Self { + Self { options: 0 } + } + + pub const fn keyexprs(&self) -> bool { + imsg::has_flag(self.options, Self::KEYEXPRS.options) + } + + pub const fn subscribers(&self) -> bool { + imsg::has_flag(self.options, Self::SUBSCRIBERS.options) + } + + pub const fn queryables(&self) -> bool { + imsg::has_flag(self.options, Self::QUERYABLES.options) + } + + pub const fn tokens(&self) -> bool { + imsg::has_flag(self.options, Self::TOKENS.options) + } + + pub const fn restricted(&self) -> bool { + imsg::has_flag(self.options, Self::RESTRICTED.options) + } + + pub const fn named(&self) -> bool { + imsg::has_flag(self.options, Self::NAMED.options) + } + + pub const fn mapping(&self) -> bool { + imsg::has_flag(self.options, Self::MAPPING.options) + } + + pub const fn aggregate(&self) -> bool { + imsg::has_flag(self.options, Self::AGGREGATE.options) + } + + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let mut s = Self::empty(); + if rng.gen_bool(0.5) { + s += InterestOptions::KEYEXPRS; + } + if rng.gen_bool(0.5) { + s += InterestOptions::SUBSCRIBERS; + } + if rng.gen_bool(0.5) { + s += InterestOptions::TOKENS; + } + if rng.gen_bool(0.5) { + s += InterestOptions::AGGREGATE; + } + s + } +} + +impl PartialEq for InterestOptions { + fn eq(&self, other: &Self) -> bool { + self.keyexprs() == other.keyexprs() + && self.subscribers() == other.subscribers() + && self.queryables() == other.queryables() + && self.tokens() == other.tokens() + && self.aggregate() == other.aggregate() + } +} + +impl Debug for InterestOptions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Interest {{ ")?; + if self.keyexprs() { + write!(f, "K:Y, ")?; + } else { + write!(f, "K:N, ")?; + } + if self.subscribers() { + write!(f, "S:Y, ")?; + } else { + write!(f, "S:N, ")?; + } + if self.queryables() { + write!(f, "Q:Y, ")?; + } else { + write!(f, "Q:N, ")?; + } + if self.tokens() { + write!(f, "T:Y, ")?; + } else { + write!(f, "T:N, ")?; + } + if self.aggregate() { + write!(f, "A:Y")?; + } else { + write!(f, "A:N")?; + } + write!(f, " }}")?; + Ok(()) + } +} + +impl Eq for InterestOptions {} + +impl Add for InterestOptions { + type Output = Self; + + #[allow(clippy::suspicious_arithmetic_impl)] // Allows to implement Add & Sub for Interest + fn add(self, rhs: Self) -> Self::Output { + Self { + options: self.options | rhs.options, + } + } +} + +impl AddAssign for InterestOptions { + #[allow(clippy::suspicious_op_assign_impl)] // Allows to implement Add & Sub for Interest + fn add_assign(&mut self, rhs: Self) { + self.options |= rhs.options; + } +} + +impl Sub for InterestOptions { + type Output = Self; + + fn sub(self, rhs: Self) -> Self::Output { + Self { + options: self.options & !rhs.options, + } + } +} + +impl SubAssign for InterestOptions { + fn sub_assign(&mut self, rhs: Self) { + self.options &= !rhs.options; + } +} + +impl From for InterestOptions { + fn from(options: u8) -> Self { + Self { options } + } +} diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index e60388f425..5a0635c9e0 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // pub mod declare; +pub mod interest; pub mod oam; pub mod push; pub mod request; @@ -20,10 +21,10 @@ pub mod response; use core::fmt; pub use declare::{ - Declare, DeclareBody, DeclareFinal, DeclareInterest, DeclareKeyExpr, DeclareMode, - DeclareQueryable, DeclareSubscriber, DeclareToken, UndeclareKeyExpr, UndeclareQueryable, - UndeclareSubscriber, UndeclareToken, + Declare, DeclareBody, DeclareFinal, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, + DeclareToken, UndeclareKeyExpr, UndeclareQueryable, UndeclareSubscriber, UndeclareToken, }; +pub use interest::Interest; pub use oam::Oam; pub use push::Push; pub use request::{AtomicRequestId, Request, RequestId}; @@ -40,6 +41,7 @@ pub mod id { pub const REQUEST: u8 = 0x1c; pub const RESPONSE: u8 = 0x1b; pub const RESPONSE_FINAL: u8 = 0x1a; + pub const INTEREST: u8 = 0x19; } #[repr(u8)] @@ -73,6 +75,7 @@ pub enum NetworkBody { Request(Request), Response(Response), ResponseFinal(ResponseFinal), + Interest(Interest), Declare(Declare), OAM(Oam), } @@ -117,6 +120,7 @@ impl NetworkMessage { NetworkBody::Request(msg) => msg.ext_qos.is_express(), NetworkBody::Response(msg) => msg.ext_qos.is_express(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.is_express(), + NetworkBody::Interest(msg) => msg.ext_qos.is_express(), NetworkBody::Declare(msg) => msg.ext_qos.is_express(), NetworkBody::OAM(msg) => msg.ext_qos.is_express(), } @@ -133,6 +137,7 @@ impl NetworkMessage { NetworkBody::Request(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Response(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_congestion_control(), + NetworkBody::Interest(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Declare(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::OAM(msg) => msg.ext_qos.get_congestion_control(), }; @@ -147,6 +152,7 @@ impl NetworkMessage { NetworkBody::Request(msg) => msg.ext_qos.get_priority(), NetworkBody::Response(msg) => msg.ext_qos.get_priority(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_priority(), + NetworkBody::Interest(msg) => msg.ext_qos.get_priority(), NetworkBody::Declare(msg) => msg.ext_qos.get_priority(), NetworkBody::OAM(msg) => msg.ext_qos.get_priority(), } @@ -162,6 +168,7 @@ impl fmt::Display for NetworkMessage { Request(_) => write!(f, "Request"), Response(_) => write!(f, "Response"), ResponseFinal(_) => write!(f, "ResponseFinal"), + Interest(_) => write!(f, "Interest"), Declare(_) => write!(f, "Declare"), } } diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index bf569d0345..09edde884e 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -145,7 +145,10 @@ pub fn map_zmsg_to_shminfo(msg: &mut NetworkMessage) -> ZResult { ResponseBody::Reply(b) => b.map_to_shminfo(), ResponseBody::Err(b) => b.map_to_shminfo(), }, - NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), + NetworkBody::ResponseFinal(_) + | NetworkBody::Interest(_) + | NetworkBody::Declare(_) + | NetworkBody::OAM(_) => Ok(false), } } @@ -196,7 +199,10 @@ pub fn map_zmsg_to_shmbuf( ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), ResponseBody::Err(b) => b.map_to_shmbuf(shmr), }, - NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), + NetworkBody::ResponseFinal(_) + | NetworkBody::Interest(_) + | NetworkBody::Declare(_) + | NetworkBody::OAM(_) => Ok(false), } } diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index 1e8da2c3c9..d2bfb5bcfe 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -53,7 +53,7 @@ pub use zenoh_keyexpr::*; pub use zenoh_macros::{kedefine, keformat, kewrite}; use zenoh_protocol::{ core::{key_expr::canon::Canonizable, ExprId, WireExpr}, - network::{declare, DeclareBody, DeclareMode, Mapping, UndeclareKeyExpr}, + network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; @@ -664,7 +664,7 @@ impl SyncResolve for KeyExprUndeclaration<'_> { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(zenoh_protocol::network::Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index d62e410c81..e58e01a1b5 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -67,6 +67,7 @@ impl TransportPeerEventHandler for DeMux { match msg.body { NetworkBody::Push(m) => self.face.send_push(m), NetworkBody::Declare(m) => self.face.send_declare(m), + NetworkBody::Interest(_) => todo!(), NetworkBody::Request(m) => self.face.send_request(m), NetworkBody::Response(m) => self.face.send_response(m), NetworkBody::ResponseFinal(m) => self.face.send_response_final(m), diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs index fd85280be0..d3aa8097ca 100644 --- a/zenoh/src/net/primitives/mod.rs +++ b/zenoh/src/net/primitives/mod.rs @@ -18,11 +18,15 @@ use std::any::Any; pub use demux::*; pub use mux::*; -use zenoh_protocol::network::{Declare, Push, Request, Response, ResponseFinal}; +use zenoh_protocol::network::{ + interest::Interest, Declare, Push, Request, Response, ResponseFinal, +}; use super::routing::RoutingContext; pub trait Primitives: Send + Sync { + fn send_interest(&self, msg: Interest); + fn send_declare(&self, msg: Declare); fn send_push(&self, msg: Push); @@ -56,6 +60,8 @@ pub(crate) trait EPrimitives: Send + Sync { pub struct DummyPrimitives; impl Primitives for DummyPrimitives { + fn send_interest(&self, _msg: Interest) {} + fn send_declare(&self, _msg: Declare) {} fn send_push(&self, _msg: Push) {} diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 5c473e8ad8..ccb2452f30 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -19,7 +19,8 @@ use crate::net::routing::{ }; use std::sync::OnceLock; use zenoh_protocol::network::{ - Declare, NetworkBody, NetworkMessage, Push, Request, Response, ResponseFinal, + interest::Interest, Declare, NetworkBody, NetworkMessage, Push, Request, Response, + ResponseFinal, }; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; @@ -40,6 +41,34 @@ impl Mux { } impl Primitives for Mux { + fn send_interest(&self, msg: Interest) { + let msg = NetworkMessage { + body: NetworkBody::Interest(msg), + #[cfg(feature = "stats")] + size: None, + }; + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let Some(face) = face.upgrade() else { + log::debug!("Invalid face: {:?}. Interest not sent: {:?}", face, msg); + return; + }; + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + log::debug!("Uninitialized multiplexer. Interest not sent: {:?}", msg); + } + } + fn send_declare(&self, msg: Declare) { let msg = NetworkMessage { body: NetworkBody::Declare(msg), @@ -316,6 +345,30 @@ impl McastMux { } impl Primitives for McastMux { + fn send_interest(&self, msg: Interest) { + let msg = NetworkMessage { + body: NetworkBody::Interest(msg), + #[cfg(feature = "stats")] + size: None, + }; + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + log::error!("Uninitialized multiplexer!"); + } + } + fn send_declare(&self, msg: Declare) { let msg = NetworkMessage { body: NetworkBody::Declare(msg), diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 371edee57b..29c3f0da2f 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -154,7 +154,7 @@ impl fmt::Display for FaceState { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct WeakFace { pub(crate) tables: Weak, pub(crate) state: Weak, @@ -185,6 +185,10 @@ impl Face { } impl Primitives for Face { + fn send_interest(&self, _msg: zenoh_protocol::network::Interest) { + todo!() + } + fn send_declare(&self, msg: zenoh_protocol::network::Declare) { let ctrl_lock = zlock!(self.tables.ctrl_lock); match msg.body { @@ -238,8 +242,7 @@ impl Primitives for Face { } zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), - zenoh_protocol::network::DeclareBody::DeclareInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::DeclareFinal(_m) => todo!(), + zenoh_protocol::network::DeclareBody::DeclareFinal(_) => todo!(), } drop(ctrl_lock); } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 58a081d743..62193cdf93 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -27,7 +27,7 @@ use zenoh_protocol::{ network::{ declare::{ ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, - DeclareBody, DeclareKeyExpr, DeclareMode, + DeclareBody, DeclareKeyExpr, }, Mapping, }, @@ -465,7 +465,7 @@ impl Resource { .insert(expr_id, nonwild_prefix.clone()); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 6c689d3336..e85bb77bf9 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareMode, DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -53,7 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -137,7 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -171,7 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -206,7 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 28e1d75460..5c0bc5349b 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -93,7 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -165,7 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -418,7 +418,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -460,7 +460,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 356793e3a3..150c12a632 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -36,7 +36,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -126,7 +126,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -170,7 +170,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -339,7 +339,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -365,7 +365,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 5ac0b22846..b495248788 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -30,7 +30,7 @@ use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareMode, DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -53,7 +53,7 @@ fn propagate_simple_subscription_to( let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -137,7 +137,7 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -171,7 +171,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -206,7 +206,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index c2d62c7658..72c32b9217 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -33,7 +33,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -93,7 +93,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -165,7 +165,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -412,7 +412,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -564,7 +564,7 @@ pub(super) fn undeclare_client_subscription( if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -606,7 +606,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -635,7 +635,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -774,7 +774,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -800,7 +800,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index e647cf2dc7..99e787beb5 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -36,7 +36,7 @@ use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; @@ -194,7 +194,7 @@ fn send_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -248,7 +248,7 @@ fn propagate_simple_queryable( let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -473,7 +473,7 @@ fn send_forget_sourced_queryable_to_net_childs( someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { @@ -499,7 +499,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -775,7 +775,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -874,7 +874,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links if forget { dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -900,7 +900,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 77f51c16b3..75b4d4ef6a 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -107,6 +107,7 @@ impl RoutingContext { NetworkBody::Request(m) => Some(&m.wire_expr), NetworkBody::Response(m) => Some(&m.wire_expr), NetworkBody::ResponseFinal(_) => None, + NetworkBody::Interest(m) => m.wire_expr.as_ref(), NetworkBody::Declare(m) => match &m.body { DeclareBody::DeclareKeyExpr(m) => Some(&m.wire_expr), DeclareBody::UndeclareKeyExpr(_) => None, @@ -116,7 +117,6 @@ impl RoutingContext { DeclareBody::UndeclareQueryable(m) => Some(&m.ext_wire_expr.wire_expr), DeclareBody::DeclareToken(m) => Some(&m.wire_expr), DeclareBody::UndeclareToken(m) => Some(&m.ext_wire_expr.wire_expr), - DeclareBody::DeclareInterest(m) => m.wire_expr.as_ref(), DeclareBody::DeclareFinal(_) => None, }, NetworkBody::OAM(_) => None, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 5b5b41b390..78ece859c7 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -33,6 +33,7 @@ use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; use zenoh_plugin_trait::{PluginControl, PluginStatus}; use zenoh_protocol::network::declare::QueryableId; +use zenoh_protocol::network::Interest; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, @@ -40,8 +41,8 @@ use zenoh_protocol::{ }, network::{ declare::{queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo}, - ext, Declare, DeclareBody, DeclareMode, DeclareQueryable, DeclareSubscriber, Push, Request, - Response, ResponseFinal, + ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, + ResponseFinal, }, zenoh::{PushBody, RequestBody}, }; @@ -277,7 +278,7 @@ impl AdminSpace { zlock!(admin.primitives).replace(primitives.clone()); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, @@ -290,7 +291,7 @@ impl AdminSpace { }); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -320,6 +321,10 @@ impl AdminSpace { } impl Primitives for AdminSpace { + fn send_interest(&self, msg: Interest) { + log::trace!("Recv interest {:?}", msg); + } + fn send_declare(&self, msg: Declare) { log::trace!("Recv declare {:?}", msg); if let DeclareBody::DeclareKeyExpr(m) = msg.body { diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 35db2a7ac4..841bc209f6 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -26,7 +26,7 @@ use zenoh_protocol::core::{ key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, }; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr, DeclareMode}; +use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; use zenoh_protocol::zenoh::{PushBody, Put}; #[test] @@ -495,6 +495,8 @@ impl ClientPrimitives { } impl Primitives for ClientPrimitives { + fn send_interest(&self, _msg: zenoh_protocol::network::Interest) {} + fn send_declare(&self, msg: zenoh_protocol::network::Declare) { match msg.body { DeclareBody::DeclareKeyExpr(d) => { @@ -579,7 +581,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -607,7 +609,7 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -629,7 +631,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -657,7 +659,7 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -679,7 +681,7 @@ fn client_test() { Primitives::send_declare( primitives2.as_ref(), Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 3f1c382a66..29ad9c2b00 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -69,7 +69,7 @@ use zenoh_protocol::{ network::{ declare::{ self, common::ext::WireExprType, queryable::ext::QueryableInfoType, - subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, + subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, request::{self, ext::TargetType, Request}, @@ -893,7 +893,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1106,7 +1106,7 @@ impl Session { // }; primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1163,7 +1163,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1215,7 +1215,7 @@ impl Session { distance: 0, }; primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1237,7 +1237,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1273,7 +1273,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: declare::ext::NodeIdType::DEFAULT, @@ -1298,7 +1298,7 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, + interest_id: None, ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::DEFAULT, @@ -1984,6 +1984,9 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } impl Primitives for Session { + fn send_interest(&self, msg: zenoh_protocol::network::Interest) { + trace!("recv Interest {} {:?}", msg.id, msg.wire_expr); + } fn send_declare(&self, msg: zenoh_protocol::network::Declare) { match msg.body { zenoh_protocol::network::DeclareBody::DeclareKeyExpr(m) => { @@ -2086,7 +2089,6 @@ impl Primitives for Session { } DeclareBody::DeclareToken(_) => todo!(), DeclareBody::UndeclareToken(_) => todo!(), - DeclareBody::DeclareInterest(_) => todo!(), DeclareBody::DeclareFinal(_) => todo!(), } } From 64227479e237a91e3d82a3fd21a5e8b2367aa89e Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 16:42:27 +0200 Subject: [PATCH 227/357] Properties do not depend on Parameters --- commons/zenoh-protocol/src/core/properties.rs | 121 +++++++++++++----- zenoh/src/selector.rs | 2 +- 2 files changed, 92 insertions(+), 31 deletions(-) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index d2f0506d46..88097debdc 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -11,12 +11,25 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; use alloc::borrow::Cow; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; +const LIST_SEPARATOR: char = ';'; +const FIELD_SEPARATOR: char = '='; +const VALUE_SEPARATOR: char = '|'; + +fn split_once(s: &str, c: char) -> (&str, &str) { + match s.find(c) { + Some(index) => { + let (l, r) = s.split_at(index); + (l, &r[1..]) + } + None => (s, ""), + } +} + /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties /// and `=` as separator between a key and its value. Keys and values are trimed. @@ -71,25 +84,37 @@ impl Properties<'_> { self.get(k).is_some() } - /// Returns a reference to the value corresponding to the key. + /// Returns a reference to the `&str`-value corresponding to the key. pub fn get(&self, k: K) -> Option<&str> where K: Borrow, { - Parameters::get(self.as_str(), k.borrow()) + self.iter() + .find(|(key, _)| *key == k.borrow()) + .map(|(_, value)| value) } - /// Returns an iterator to the values corresponding to the key. + /// Returns an iterator to the `&str`-values corresponding to the key. pub fn values(&self, k: K) -> impl DoubleEndedIterator where K: Borrow, { - Parameters::values(self.as_str(), k.borrow()) + match self.get(k) { + Some(v) => v.split(VALUE_SEPARATOR), + None => { + let mut i = "".split(VALUE_SEPARATOR); + i.next(); + i + } + } } /// Returns an iterator on the key-value pairs as `(&str, &str)`. pub fn iter(&self) -> impl DoubleEndedIterator + Clone { - Parameters::iter(self.as_str()) + self.as_str() + .split(LIST_SEPARATOR) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) } /// Inserts a key-value pair into the map. @@ -100,10 +125,17 @@ impl Properties<'_> { K: Borrow, V: Borrow, { - let (inner, removed) = Parameters::insert(self.iter(), k.borrow(), v.borrow()); - let removed = removed.map(|s| s.to_string()); - self.0 = Cow::Owned(inner); - removed + let item = self + .iter() + .find(|(key, _)| *key == k.borrow()) + .map(|(_, v)| v.to_string()); + + let current = self.iter().filter(|x| x.0 != k.borrow()); + let new = Some((k.borrow(), v.borrow())).into_iter(); + let iter = current.chain(new); + + *self = Self::from_iter(iter); + item } /// Removes a key from the map, returning the value at the key if the key was previously in the properties. @@ -111,23 +143,35 @@ impl Properties<'_> { where K: Borrow, { - let (inner, removed) = Parameters::remove(self.iter(), k.borrow()); - let removed = removed.map(|s| s.to_string()); - self.0 = Cow::Owned(inner); - removed + let item = self + .iter() + .find(|(key, _)| *key == k.borrow()) + .map(|(_, v)| v.to_string()); + let iter = self.iter().filter(|x| x.0 != k.borrow()); + + *self = Self::from_iter(iter); + item } - /// Join an iterator of key-value pairs `(&str, &str)` into properties. - pub fn join<'s, I, K, V>(&mut self, iter: I) + /// Extend these properties with other properties. + pub fn extend(&mut self, other: &Properties) { + self.extend_from_iter(other.iter()); + } + + /// Extend these properties from an iterator. + pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) where I: Iterator + Clone, K: Borrow + 's + ?Sized, V: Borrow + 's + ?Sized, { - self.0 = Cow::Owned(Parameters::join( - Parameters::iter(self.as_str()), - iter.map(|(k, v)| (k.borrow(), v.borrow())), - )); + let new: I = iter.clone(); + let current = self + .iter() + .filter(|(kc, _)| !new.clone().any(|(kn, _)| *kc == kn.borrow())); + let iter = current.chain(iter.map(|(k, v)| (k.borrow(), v.borrow()))); + + *self = Self::from_iter(iter); } /// Convert these properties into owned properties. @@ -170,7 +214,29 @@ where V: Borrow + 's + ?Sized, { fn from_iter>(iter: T) -> Self { - let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))); + fn concat<'s, I>(iter: I) -> String + where + I: Iterator, + { + let mut into = String::new(); + let mut first = true; + for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { + if !first { + into.push(LIST_SEPARATOR); + } + into.push_str(k); + if !v.is_empty() { + into.push(FIELD_SEPARATOR); + into.push_str(v); + } + first = false; + } + into + } + + let iter = iter.into_iter(); + let inner = concat(iter.map(|(k, v)| (k.borrow(), v.borrow()))); + Self(Cow::Owned(inner)) } } @@ -181,8 +247,7 @@ where V: Borrow + 's, { fn from_iter>(iter: T) -> Self { - let inner = Parameters::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))); - Self(Cow::Owned(inner)) + Self::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))) } } @@ -210,25 +275,21 @@ where #[cfg(feature = "std")] impl<'s> From<&'s Properties<'s>> for HashMap<&'s str, &'s str> { fn from(props: &'s Properties<'s>) -> Self { - HashMap::from_iter(Parameters::iter(props.as_str())) + HashMap::from_iter(props.iter()) } } #[cfg(feature = "std")] impl From<&Properties<'_>> for HashMap { fn from(props: &Properties<'_>) -> Self { - HashMap::from_iter( - Parameters::iter(props.as_str()).map(|(k, v)| (k.to_string(), v.to_string())), - ) + HashMap::from_iter(props.iter().map(|(k, v)| (k.to_string(), v.to_string()))) } } #[cfg(feature = "std")] impl<'s> From<&'s Properties<'s>> for HashMap, Cow<'s, str>> { fn from(props: &'s Properties<'s>) -> Self { - HashMap::from_iter( - Parameters::iter(props.as_str()).map(|(k, v)| (Cow::from(k), Cow::from(v))), - ) + HashMap::from_iter(props.iter().map(|(k, v)| (Cow::from(k), Cow::from(v)))) } } diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 893bfdb8a3..3e367c6864 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -388,7 +388,7 @@ fn selector_accessors() { let hm: HashMap = HashMap::from(selector.parameters()); assert!(hm.contains_key(TIME_RANGE_KEY)); - selector.parameters_mut().join(hm.iter()); + selector.parameters_mut().extend_from_iter(hm.iter()); assert_eq!(selector.parameters().get("_filter").unwrap(), ""); selector.set_accept_any_keyexpr(true); From 9a8c2102f2e5a595f0c41cc2c08711013f17d7e9 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 17:00:46 +0200 Subject: [PATCH 228/357] Remove #[non_exhaustive] for Properties --- commons/zenoh-protocol/src/core/properties.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 88097debdc..bd00507509 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -61,7 +61,6 @@ fn split_once(s: &str, c: char) -> (&str, &str) { /// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); /// assert_eq!(p, pi); /// ``` -#[non_exhaustive] #[derive(Clone, PartialEq, Eq, Default)] pub struct Properties<'s>(Cow<'s, str>); From fef7148293cc0992ffa69ae9cfc8a3b3e598fbed Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 18:02:51 +0200 Subject: [PATCH 229/357] Add OrderedProperties --- commons/zenoh-protocol/src/core/properties.rs | 213 ++++++++++++++++++ 1 file changed, 213 insertions(+) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index bd00507509..2e323925fc 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -177,6 +177,18 @@ impl Properties<'_> { pub fn into_owned(self) -> Properties<'static> { Properties(Cow::Owned(self.0.into_owned())) } + + /// Returns true if all keys are sorted in alphabetical order. + pub fn is_ordered(&self) -> bool { + let mut prev = None; + for (k, _) in self.iter() { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true + } } impl<'s> From<&'s str> for Properties<'s> { @@ -311,6 +323,207 @@ impl fmt::Debug for Properties<'_> { } } +#[derive(Clone, PartialEq, Eq, Default)] +pub struct OrderedProperties<'s>(Properties<'s>); + +impl OrderedProperties<'_> { + /// Returns `true` if properties does not contain anything. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns properties as [`str`]. + pub fn as_str(&self) -> &str { + self.0.as_str() + } + + /// Returns `true` if properties contains the specified key. + pub fn contains_key(&self, k: K) -> bool + where + K: Borrow, + { + self.0.contains_key(k) + } + + /// Returns a reference to the `&str`-value corresponding to the key. + pub fn get(&self, k: K) -> Option<&str> + where + K: Borrow, + { + self.0.get(k) + } + + /// Returns an iterator to the `&str`-values corresponding to the key. + pub fn values(&self, k: K) -> impl DoubleEndedIterator + where + K: Borrow, + { + self.0.values(k) + } + + /// Returns an iterator on the key-value pairs as `(&str, &str)`. + pub fn iter(&self) -> impl DoubleEndedIterator + Clone { + self.0.iter() + } + + /// Removes a key from the map, returning the value at the key if the key was previously in the properties. + pub fn remove(&mut self, k: K) -> Option + where + K: Borrow, + { + self.0.remove(k) + } + + /// Inserts a key-value pair into the map. + /// If the map did not have this key present, [`None`]` is returned. + /// If the map did have this key present, the value is updated, and the old value is returned. + pub fn insert(&mut self, k: K, v: V) -> Option + where + K: Borrow, + V: Borrow, + { + let item = self.0.insert(k, v); + self.order(); + item + } + + /// Extend these properties with other properties. + pub fn extend(&mut self, other: &Properties) { + self.extend_from_iter(other.iter()); + } + + /// Extend these properties from an iterator. + pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) + where + I: Iterator + Clone, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, + { + self.0.extend_from_iter(iter); + self.order(); + } + + /// Convert these properties into owned properties. + pub fn into_owned(self) -> OrderedProperties<'static> { + OrderedProperties(self.0.into_owned()) + } + + fn order(&mut self) { + if !self.0.is_ordered() { + let mut from = self.0.iter().collect::>(); + from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); + self.0 = Properties::from_iter(from); + } + } +} + +impl<'s> From> for OrderedProperties<'s> { + fn from(value: Properties<'s>) -> Self { + let mut props = Self(value); + props.order(); + props + } +} + +impl<'s> From<&'s str> for OrderedProperties<'s> { + fn from(value: &'s str) -> Self { + Self::from(Properties::from(value)) + } +} + +impl From for OrderedProperties<'_> { + fn from(value: String) -> Self { + Self::from(Properties::from(value)) + } +} + +impl<'s> From> for OrderedProperties<'s> { + fn from(value: Cow<'s, str>) -> Self { + Self::from(Properties::from(value)) + } +} + +impl<'s, K, V> FromIterator<(&'s K, &'s V)> for OrderedProperties<'_> +where + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, +{ + fn from_iter>(iter: T) -> Self { + Self::from(Properties::from_iter(iter)) + } +} + +impl<'s, K, V> FromIterator<&'s (K, V)> for OrderedProperties<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from_iter>(iter: T) -> Self { + Self::from(Properties::from_iter(iter)) + } +} + +impl<'s, K, V> From<&'s [(K, V)]> for OrderedProperties<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from(value: &'s [(K, V)]) -> Self { + Self::from_iter(value.iter()) + } +} + +#[cfg(feature = "std")] +impl From> for OrderedProperties<'_> +where + K: Borrow, + V: Borrow, +{ + fn from(map: HashMap) -> Self { + Self::from_iter(map.iter()) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s OrderedProperties<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s OrderedProperties<'s>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl From<&OrderedProperties<'_>> for HashMap { + fn from(props: &OrderedProperties<'_>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s OrderedProperties<'s>> for HashMap, Cow<'s, str>> { + fn from(props: &'s OrderedProperties<'s>) -> Self { + HashMap::from(&props.0) + } +} + +#[cfg(feature = "std")] +impl From> for HashMap { + fn from(props: OrderedProperties) -> Self { + HashMap::from(&props) + } +} + +impl fmt::Display for OrderedProperties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl fmt::Debug for OrderedProperties<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + #[cfg(test)] mod tests { use super::*; From 587a7cdbed170d5a1116eaf0a52f2a49da31fd08 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 18:29:02 +0200 Subject: [PATCH 230/357] Revert Properties depend on Parameters --- commons/zenoh-protocol/src/core/mod.rs | 1 - commons/zenoh-protocol/src/core/parameters.rs | 68 ++++---- commons/zenoh-protocol/src/core/properties.rs | 145 +++++------------- 3 files changed, 72 insertions(+), 142 deletions(-) diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 0920d55d01..1652d6bdad 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -54,7 +54,6 @@ pub mod resolution; pub use resolution::*; pub mod parameters; -pub use parameters::*; pub mod properties; pub use properties::*; diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index b49ee1a1f9..59aaa54e28 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -pub const LIST_SEPARATOR: char = ';'; -pub const FIELD_SEPARATOR: char = '='; -pub const VALUE_SEPARATOR: char = '|'; +pub(super) const LIST_SEPARATOR: char = ';'; +pub(super) const FIELD_SEPARATOR: char = '='; +pub(super) const VALUE_SEPARATOR: char = '|'; use alloc::{string::String, vec::Vec}; @@ -29,11 +29,11 @@ fn split_once(s: &str, c: char) -> (&str, &str) { /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. /// [`SortedParameters`] it's like [`Parameters`] but with the guarantee that keys are sorted upon insertion. -pub struct SortedParameters; +pub(super) struct SortedParameters; impl SortedParameters { #[allow(clippy::should_implement_trait)] - pub fn from_iter<'s, I>(iter: I) -> String + pub(super) fn from_iter<'s, I>(iter: I) -> String where I: Iterator, { @@ -42,7 +42,7 @@ impl SortedParameters { into } - pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + pub(super) fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, { @@ -51,7 +51,7 @@ impl SortedParameters { Parameters::from_iter_into(from.iter().copied(), into); } - pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) where I: Iterator + Clone, { @@ -64,7 +64,7 @@ impl SortedParameters { (SortedParameters::from_iter(iter), item) } - pub fn join<'s, C, N>(current: C, new: N) -> String + pub(super) fn join<'s, C, N>(current: C, new: N) -> String where C: Iterator + Clone, N: Iterator + Clone, @@ -74,7 +74,7 @@ impl SortedParameters { into } - pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) + pub(super) fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where C: Iterator + Clone, N: Iterator + Clone, @@ -86,34 +86,20 @@ impl SortedParameters { let iter = current.chain(new); SortedParameters::from_iter_into(iter, into); } - - pub fn is_sorted<'s, I>(iter: I) -> bool - where - I: Iterator, - { - let mut prev = None; - for (k, _) in iter { - match prev.take() { - Some(p) if k < p => return false, - _ => prev = Some(k), - } - } - true - } } /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -pub struct Parameters; +pub(super) struct Parameters; impl Parameters { - pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + pub(super) fn iter(s: &str) -> impl DoubleEndedIterator + Clone { s.split(LIST_SEPARATOR) .filter(|p| !p.is_empty()) .map(|p| split_once(p, FIELD_SEPARATOR)) } #[allow(clippy::should_implement_trait)] - pub fn from_iter<'s, I>(iter: I) -> String + pub(super) fn from_iter<'s, I>(iter: I) -> String where I: Iterator, { @@ -122,20 +108,20 @@ impl Parameters { into } - pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + pub(super) fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, { Self::concat_into(iter, into); } - pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { + pub(super) fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { Self::iter(s) .find(|(key, _)| *key == k) .map(|(_, value)| value) } - pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { + pub(super) fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { match Self::get(s, k) { Some(v) => v.split(VALUE_SEPARATOR), None => { @@ -146,7 +132,7 @@ impl Parameters { } } - pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) where I: Iterator + Clone, { @@ -159,7 +145,7 @@ impl Parameters { (Parameters::from_iter(iter), item) } - pub fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) + pub(super) fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) where I: Iterator, { @@ -168,7 +154,7 @@ impl Parameters { (Parameters::concat(iter), item) } - pub fn join<'s, C, N>(current: C, new: N) -> String + pub(super) fn join<'s, C, N>(current: C, new: N) -> String where C: Iterator + Clone, N: Iterator + Clone, @@ -178,7 +164,7 @@ impl Parameters { into } - pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) + pub(super) fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where C: Iterator + Clone, N: Iterator + Clone, @@ -218,8 +204,22 @@ impl Parameters { } } + pub(super) fn is_ordered<'s, I>(iter: I) -> bool + where + I: Iterator, + { + let mut prev = None; + for (k, _) in iter { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true + } + #[cfg(feature = "test")] - pub fn rand(into: &mut String) { + pub(super) fn rand(into: &mut String) { use rand::{ distributions::{Alphanumeric, DistString}, Rng, diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 2e323925fc..e2f11e8814 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -11,25 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // +use super::parameters::{ + Parameters, SortedParameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR, +}; use alloc::borrow::Cow; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; -const LIST_SEPARATOR: char = ';'; -const FIELD_SEPARATOR: char = '='; -const VALUE_SEPARATOR: char = '|'; - -fn split_once(s: &str, c: char) -> (&str, &str) { - match s.find(c) { - Some(index) => { - let (l, r) = s.split_at(index); - (l, &r[1..]) - } - None => (s, ""), - } -} - /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties /// and `=` as separator between a key and its value. Keys and values are trimed. @@ -61,17 +50,17 @@ fn split_once(s: &str, c: char) -> (&str, &str) { /// let pi = Properties::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); /// assert_eq!(p, pi); /// ``` -#[derive(Clone, PartialEq, Eq, Default)] +#[derive(Clone, PartialEq, Eq, Hash, Default)] pub struct Properties<'s>(Cow<'s, str>); -impl Properties<'_> { +impl<'s> Properties<'s> { /// Returns `true` if properties does not contain anything. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns properties as [`str`]. - pub fn as_str(&self) -> &str { + pub fn as_str(&'s self) -> &'s str { &self.0 } @@ -80,40 +69,28 @@ impl Properties<'_> { where K: Borrow, { - self.get(k).is_some() + Parameters::get(self.as_str(), k.borrow()).is_some() } /// Returns a reference to the `&str`-value corresponding to the key. - pub fn get(&self, k: K) -> Option<&str> + pub fn get(&'s self, k: K) -> Option<&'s str> where K: Borrow, { - self.iter() - .find(|(key, _)| *key == k.borrow()) - .map(|(_, value)| value) + Parameters::get(self.as_str(), k.borrow()) } /// Returns an iterator to the `&str`-values corresponding to the key. - pub fn values(&self, k: K) -> impl DoubleEndedIterator + pub fn values(&'s self, k: K) -> impl DoubleEndedIterator where K: Borrow, { - match self.get(k) { - Some(v) => v.split(VALUE_SEPARATOR), - None => { - let mut i = "".split(VALUE_SEPARATOR); - i.next(); - i - } - } + Parameters::values(self.as_str(), k.borrow()) } /// Returns an iterator on the key-value pairs as `(&str, &str)`. - pub fn iter(&self) -> impl DoubleEndedIterator + Clone { - self.as_str() - .split(LIST_SEPARATOR) - .filter(|p| !p.is_empty()) - .map(|p| split_once(p, FIELD_SEPARATOR)) + pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { + Parameters::iter(self.as_str()) } /// Inserts a key-value pair into the map. @@ -124,16 +101,9 @@ impl Properties<'_> { K: Borrow, V: Borrow, { - let item = self - .iter() - .find(|(key, _)| *key == k.borrow()) - .map(|(_, v)| v.to_string()); - - let current = self.iter().filter(|x| x.0 != k.borrow()); - let new = Some((k.borrow(), v.borrow())).into_iter(); - let iter = current.chain(new); - - *self = Self::from_iter(iter); + let (inner, item) = Parameters::insert(self.iter(), k.borrow(), v.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); item } @@ -142,13 +112,9 @@ impl Properties<'_> { where K: Borrow, { - let item = self - .iter() - .find(|(key, _)| *key == k.borrow()) - .map(|(_, v)| v.to_string()); - let iter = self.iter().filter(|x| x.0 != k.borrow()); - - *self = Self::from_iter(iter); + let (inner, item) = Parameters::remove(self.iter(), k.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); item } @@ -158,19 +124,14 @@ impl Properties<'_> { } /// Extend these properties from an iterator. - pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) + pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) where - I: Iterator + Clone, - K: Borrow + 's + ?Sized, - V: Borrow + 's + ?Sized, + I: Iterator + Clone, + K: Borrow + 'e + ?Sized, + V: Borrow + 'e + ?Sized, { - let new: I = iter.clone(); - let current = self - .iter() - .filter(|(kc, _)| !new.clone().any(|(kn, _)| *kc == kn.borrow())); - let iter = current.chain(iter.map(|(k, v)| (k.borrow(), v.borrow()))); - - *self = Self::from_iter(iter); + let inner = Parameters::join(self.iter(), iter.map(|(k, v)| (k.borrow(), v.borrow()))); + self.0 = Cow::Owned(inner); } /// Convert these properties into owned properties. @@ -180,14 +141,7 @@ impl Properties<'_> { /// Returns true if all keys are sorted in alphabetical order. pub fn is_ordered(&self) -> bool { - let mut prev = None; - for (k, _) in self.iter() { - match prev.take() { - Some(p) if k < p => return false, - _ => prev = Some(k), - } - } - true + Parameters::is_ordered(self.iter()) } } @@ -225,29 +179,8 @@ where V: Borrow + 's + ?Sized, { fn from_iter>(iter: T) -> Self { - fn concat<'s, I>(iter: I) -> String - where - I: Iterator, - { - let mut into = String::new(); - let mut first = true; - for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } - into - } - let iter = iter.into_iter(); - let inner = concat(iter.map(|(k, v)| (k.borrow(), v.borrow()))); - + let inner = Parameters::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); Self(Cow::Owned(inner)) } } @@ -323,17 +256,17 @@ impl fmt::Debug for Properties<'_> { } } -#[derive(Clone, PartialEq, Eq, Default)] +#[derive(Clone, PartialEq, Eq, Hash, Default)] pub struct OrderedProperties<'s>(Properties<'s>); -impl OrderedProperties<'_> { +impl<'s> OrderedProperties<'s> { /// Returns `true` if properties does not contain anything. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns properties as [`str`]. - pub fn as_str(&self) -> &str { + pub fn as_str(&'s self) -> &'s str { self.0.as_str() } @@ -346,7 +279,7 @@ impl OrderedProperties<'_> { } /// Returns a reference to the `&str`-value corresponding to the key. - pub fn get(&self, k: K) -> Option<&str> + pub fn get(&'s self, k: K) -> Option<&'s str> where K: Borrow, { @@ -354,7 +287,7 @@ impl OrderedProperties<'_> { } /// Returns an iterator to the `&str`-values corresponding to the key. - pub fn values(&self, k: K) -> impl DoubleEndedIterator + pub fn values(&'s self, k: K) -> impl DoubleEndedIterator where K: Borrow, { @@ -362,7 +295,7 @@ impl OrderedProperties<'_> { } /// Returns an iterator on the key-value pairs as `(&str, &str)`. - pub fn iter(&self) -> impl DoubleEndedIterator + Clone { + pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { self.0.iter() } @@ -393,11 +326,11 @@ impl OrderedProperties<'_> { } /// Extend these properties from an iterator. - pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) + pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) where - I: Iterator + Clone, - K: Borrow + 's + ?Sized, - V: Borrow + 's + ?Sized, + I: Iterator + Clone, + K: Borrow + 'e + ?Sized, + V: Borrow + 'e + ?Sized, { self.0.extend_from_iter(iter); self.order(); @@ -410,9 +343,7 @@ impl OrderedProperties<'_> { fn order(&mut self) { if !self.0.is_ordered() { - let mut from = self.0.iter().collect::>(); - from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - self.0 = Properties::from_iter(from); + self.0 = Properties(Cow::Owned(SortedParameters::from_iter(self.iter()))); } } } From 7539346ae07b1063402ba7ee29fd39089cc4c097 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 18:31:13 +0200 Subject: [PATCH 231/357] Revert Properties depend on Parameters --- commons/zenoh-protocol/src/core/mod.rs | 1 + commons/zenoh-protocol/src/core/parameters.rs | 36 +++++++++---------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 1652d6bdad..0920d55d01 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -54,6 +54,7 @@ pub mod resolution; pub use resolution::*; pub mod parameters; +pub use parameters::*; pub mod properties; pub use properties::*; diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index 59aaa54e28..2ce430661c 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -29,11 +29,11 @@ fn split_once(s: &str, c: char) -> (&str, &str) { /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. /// [`SortedParameters`] it's like [`Parameters`] but with the guarantee that keys are sorted upon insertion. -pub(super) struct SortedParameters; +pub struct SortedParameters; impl SortedParameters { #[allow(clippy::should_implement_trait)] - pub(super) fn from_iter<'s, I>(iter: I) -> String + pub fn from_iter<'s, I>(iter: I) -> String where I: Iterator, { @@ -42,7 +42,7 @@ impl SortedParameters { into } - pub(super) fn from_iter_into<'s, I>(iter: I, into: &mut String) + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, { @@ -51,7 +51,7 @@ impl SortedParameters { Parameters::from_iter_into(from.iter().copied(), into); } - pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) where I: Iterator + Clone, { @@ -64,7 +64,7 @@ impl SortedParameters { (SortedParameters::from_iter(iter), item) } - pub(super) fn join<'s, C, N>(current: C, new: N) -> String + pub fn join<'s, C, N>(current: C, new: N) -> String where C: Iterator + Clone, N: Iterator + Clone, @@ -74,7 +74,7 @@ impl SortedParameters { into } - pub(super) fn join_into<'s, C, N>(current: C, new: N, into: &mut String) + pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where C: Iterator + Clone, N: Iterator + Clone, @@ -89,17 +89,17 @@ impl SortedParameters { } /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -pub(super) struct Parameters; +pub struct Parameters; impl Parameters { - pub(super) fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { s.split(LIST_SEPARATOR) .filter(|p| !p.is_empty()) .map(|p| split_once(p, FIELD_SEPARATOR)) } #[allow(clippy::should_implement_trait)] - pub(super) fn from_iter<'s, I>(iter: I) -> String + pub fn from_iter<'s, I>(iter: I) -> String where I: Iterator, { @@ -108,20 +108,20 @@ impl Parameters { into } - pub(super) fn from_iter_into<'s, I>(iter: I, into: &mut String) + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, { Self::concat_into(iter, into); } - pub(super) fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { + pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { Self::iter(s) .find(|(key, _)| *key == k) .map(|(_, value)| value) } - pub(super) fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { + pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { match Self::get(s, k) { Some(v) => v.split(VALUE_SEPARATOR), None => { @@ -132,7 +132,7 @@ impl Parameters { } } - pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) where I: Iterator + Clone, { @@ -145,7 +145,7 @@ impl Parameters { (Parameters::from_iter(iter), item) } - pub(super) fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) + pub fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) where I: Iterator, { @@ -154,7 +154,7 @@ impl Parameters { (Parameters::concat(iter), item) } - pub(super) fn join<'s, C, N>(current: C, new: N) -> String + pub fn join<'s, C, N>(current: C, new: N) -> String where C: Iterator + Clone, N: Iterator + Clone, @@ -164,7 +164,7 @@ impl Parameters { into } - pub(super) fn join_into<'s, C, N>(current: C, new: N, into: &mut String) + pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where C: Iterator + Clone, N: Iterator + Clone, @@ -204,7 +204,7 @@ impl Parameters { } } - pub(super) fn is_ordered<'s, I>(iter: I) -> bool + pub fn is_ordered<'s, I>(iter: I) -> bool where I: Iterator, { @@ -219,7 +219,7 @@ impl Parameters { } #[cfg(feature = "test")] - pub(super) fn rand(into: &mut String) { + pub fn rand(into: &mut String) { use rand::{ distributions::{Alphanumeric, DistString}, Rng, From 28d32a6bf2a35a86f4b3969865f033259174705f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 19:53:35 +0200 Subject: [PATCH 232/357] Remove SortedParameters --- commons/zenoh-protocol/src/core/endpoint.rs | 25 ++- commons/zenoh-protocol/src/core/parameters.rs | 169 +++++++++--------- commons/zenoh-protocol/src/core/properties.rs | 14 +- 3 files changed, 103 insertions(+), 105 deletions(-) diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 3397147369..5c7cb891ae 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -11,10 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - locator::*, - parameters::{Parameters, SortedParameters}, -}; +use super::{locator::*, parameters::Parameters}; use alloc::{borrow::ToOwned, format, string::String}; use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; @@ -251,7 +248,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - SortedParameters::join( + Parameters::join_sort( self.0.metadata().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), ), @@ -270,7 +267,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - SortedParameters::insert(self.0.metadata().iter(), k.borrow(), v.borrow()).0, + Parameters::insert_sort(self.0.metadata().as_str(), k.borrow(), v.borrow()).0, self.0.config(), )?; @@ -285,7 +282,7 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::remove(self.0.metadata().iter(), k.borrow()).0, + Parameters::remove(self.0.metadata().as_str(), k.borrow()).0, self.0.config(), )?; @@ -382,7 +379,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - SortedParameters::join( + Parameters::join_sort( self.0.config().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), ), @@ -401,7 +398,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - SortedParameters::insert(self.0.config().iter(), k.borrow(), v.borrow()).0, + Parameters::insert_sort(self.0.config().as_str(), k.borrow(), v.borrow()).0, )?; self.0.inner = ep.inner; @@ -416,7 +413,7 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::remove(self.0.config().iter(), k.borrow()).0, + Parameters::remove(self.0.config().as_str(), k.borrow()).0, )?; self.0.inner = ep.inner; @@ -578,14 +575,14 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - SortedParameters::from_iter_into(Parameters::iter(&s[midx + 1..]), &mut inner); + Parameters::from_iter_sort_into(Parameters::iter(&s[midx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some config (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - SortedParameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + Parameters::from_iter_sort_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } // There is some metadata and some config @@ -598,10 +595,10 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - SortedParameters::from_iter_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); + Parameters::from_iter_sort_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); inner.push(CONFIG_SEPARATOR); - SortedParameters::from_iter_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + Parameters::from_iter_sort_into(Parameters::iter(&s[cidx + 1..]), &mut inner); Ok(EndPoint { inner }) } diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index 2ce430661c..adf744130b 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -28,52 +28,73 @@ fn split_once(s: &str, c: char) -> (&str, &str) { } /// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -/// [`SortedParameters`] it's like [`Parameters`] but with the guarantee that keys are sorted upon insertion. -pub struct SortedParameters; +/// +/// `;` is the separator between the key-value `(&str, &str)` elements. +/// +/// `=` is the separator between the `&str`-key and `&str`-value +/// +/// `|` is the separator between multiple elements of the values. +pub struct Parameters; -impl SortedParameters { +impl Parameters { + /// Returns an iterator of key-value `(&str, &str)` pairs according to the parameters format. + pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + s.split(LIST_SEPARATOR) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) + } + + /// Builds a string from an iterator preserving the order. #[allow(clippy::should_implement_trait)] pub fn from_iter<'s, I>(iter: I) -> String where I: Iterator, { let mut into = String::new(); - Self::from_iter_into(iter, &mut into); + Parameters::from_iter_into(iter, &mut into); into } + /// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. pub fn from_iter_into<'s, I>(iter: I, into: &mut String) where I: Iterator, { - let mut from = iter.collect::>(); - from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - Parameters::from_iter_into(from.iter().copied(), into); + Parameters::concat_into(iter, into); } - pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) + /// Same as [`Self::from_iter`] but keys are sorted in alphabetical order. + pub fn from_iter_sort<'s, I>(iter: I) -> String where - I: Iterator + Clone, + I: Iterator, { - let mut ic = iter.clone(); - let item = ic.find(|(key, _)| *key == k).map(|(_, v)| v); + let mut into = String::new(); + Parameters::from_iter_into(iter, &mut into); + into + } - let current = iter.filter(|x| x.0 != k); - let new = Some((k, v)).into_iter(); - let iter = current.chain(new); - (SortedParameters::from_iter(iter), item) + /// Same as [`Self::from_iter_into`] but keys are sorted in alphabetical order. + pub fn from_iter_sort_into<'s, I>(iter: I, into: &mut String) + where + I: Iterator, + { + let mut from = iter.collect::>(); + from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); + Parameters::from_iter_into(from.iter().copied(), into); } + /// Builds a string by joining two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. pub fn join<'s, C, N>(current: C, new: N) -> String where C: Iterator + Clone, N: Iterator + Clone, { let mut into = String::new(); - SortedParameters::join_into(current, new, &mut into); + Parameters::join_into(current, new, &mut into); into } + /// Same as [`Self::join`] but it writes into a user-provided string instead of allocating a new one. pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) where C: Iterator + Clone, @@ -84,45 +105,44 @@ impl SortedParameters { .clone() .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); let iter = current.chain(new); - SortedParameters::from_iter_into(iter, into); - } -} - -/// Parameters provides an `HashMap<&str, &str>`-like view over a `&str` when `&str` follows the format `a=b;c=d|e;f=g`. -pub struct Parameters; - -impl Parameters { - pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { - s.split(LIST_SEPARATOR) - .filter(|p| !p.is_empty()) - .map(|p| split_once(p, FIELD_SEPARATOR)) + Parameters::from_iter_into(iter, into); } - #[allow(clippy::should_implement_trait)] - pub fn from_iter<'s, I>(iter: I) -> String + /// Same as [`Self::join`] but keys are sorted in alphabetical order. + pub fn join_sort<'s, C, N>(current: C, new: N) -> String where - I: Iterator, + C: Iterator + Clone, + N: Iterator + Clone, { let mut into = String::new(); - Self::from_iter_into(iter, &mut into); + Parameters::join_sort_into(current, new, &mut into); into } - pub fn from_iter_into<'s, I>(iter: I, into: &mut String) + /// Same as [`Self::join_into`] but keys are sorted in alphabetical order. + pub fn join_sort_into<'s, C, N>(current: C, new: N, into: &mut String) where - I: Iterator, + C: Iterator + Clone, + N: Iterator + Clone, { - Self::concat_into(iter, into); + let n = new.clone(); + let current = current + .clone() + .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); + let iter = current.chain(new); + Parameters::from_iter_into(iter, into); } + /// Get the a `&str`-value for a `&str`-key according to the parameters format. pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { - Self::iter(s) + Parameters::iter(s) .find(|(key, _)| *key == k) .map(|(_, value)| value) } + /// Get the a `&str`-value iterator for a `&str`-key according to the parameters format. pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { - match Self::get(s, k) { + match Parameters::get(s, k) { Some(v) => v.split(VALUE_SEPARATOR), None => { let mut i = "".split(VALUE_SEPARATOR); @@ -132,49 +152,46 @@ impl Parameters { } } - pub fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> (String, Option<&'s str>) - where - I: Iterator + Clone, - { - let mut ic = iter.clone(); - let item = ic.find(|(key, _)| *key == k).map(|(_, v)| v); + /// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. + pub fn insert<'s>(s: &'s str, k: &str, v: &str) -> (String, Option<&'s str>) { + let mut iter = Parameters::iter(s); + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - let current = iter.filter(|x| x.0 != k); + let current = Parameters::iter(s).filter(|x| x.0 != k); let new = Some((k, v)).into_iter(); let iter = current.chain(new); (Parameters::from_iter(iter), item) } - pub fn remove<'s, I>(mut iter: I, k: &'s str) -> (String, Option<&'s str>) - where - I: Iterator, - { + /// Same as [`Self::insert`] but keys are sorted in alphabetical order. + pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { + let mut iter = Parameters::iter(s); let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - let iter = iter.filter(|x| x.0 != k); - (Parameters::concat(iter), item) + + let current = Parameters::iter(s).filter(|x| x.0 != k); + let new = Some((k, v)).into_iter(); + let iter = current.chain(new); + (Parameters::from_iter_sort(iter), item) } - pub fn join<'s, C, N>(current: C, new: N) -> String - where - C: Iterator + Clone, - N: Iterator + Clone, - { - let mut into = String::new(); - Parameters::join_into(current, new, &mut into); - into + /// Remove a key-value `(&str, &str)` pair from `s` preserving the insertion order. + pub fn remove<'s>(s: &'s str, k: &str) -> (String, Option<&'s str>) { + let mut iter = Parameters::iter(s); + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + let iter = iter.filter(|x| x.0 != k); + (Parameters::concat(iter), item) } - pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) - where - C: Iterator + Clone, - N: Iterator + Clone, - { - let n = new.clone(); - let current = current - .clone() - .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); - let iter = current.chain(new); - Parameters::from_iter_into(iter, into); + /// Returns `true` if all keys are sorted in alphabetical order + pub fn is_ordered(s: &str) -> bool { + let mut prev = None; + for (k, _) in Parameters::iter(s) { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true } fn concat<'s, I>(iter: I) -> String @@ -204,20 +221,6 @@ impl Parameters { } } - pub fn is_ordered<'s, I>(iter: I) -> bool - where - I: Iterator, - { - let mut prev = None; - for (k, _) in iter { - match prev.take() { - Some(p) if k < p => return false, - _ => prev = Some(k), - } - } - true - } - #[cfg(feature = "test")] pub fn rand(into: &mut String) { use rand::{ diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index e2f11e8814..2611d2d7a9 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -11,9 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::parameters::{ - Parameters, SortedParameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR, -}; +use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; use alloc::borrow::Cow; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] @@ -101,7 +99,7 @@ impl<'s> Properties<'s> { K: Borrow, V: Borrow, { - let (inner, item) = Parameters::insert(self.iter(), k.borrow(), v.borrow()); + let (inner, item) = Parameters::insert(self.as_str(), k.borrow(), v.borrow()); let item = item.map(|i| i.to_string()); self.0 = Cow::Owned(inner); item @@ -112,7 +110,7 @@ impl<'s> Properties<'s> { where K: Borrow, { - let (inner, item) = Parameters::remove(self.iter(), k.borrow()); + let (inner, item) = Parameters::remove(self.as_str(), k.borrow()); let item = item.map(|i| i.to_string()); self.0 = Cow::Owned(inner); item @@ -139,9 +137,9 @@ impl<'s> Properties<'s> { Properties(Cow::Owned(self.0.into_owned())) } - /// Returns true if all keys are sorted in alphabetical order. + /// Returns `true`` if all keys are sorted in alphabetical order. pub fn is_ordered(&self) -> bool { - Parameters::is_ordered(self.iter()) + Parameters::is_ordered(self.as_str()) } } @@ -343,7 +341,7 @@ impl<'s> OrderedProperties<'s> { fn order(&mut self) { if !self.0.is_ordered() { - self.0 = Properties(Cow::Owned(SortedParameters::from_iter(self.iter()))); + self.0 = Properties(Cow::Owned(Parameters::from_iter_sort(self.iter()))); } } } From 8e20b019d5e4eec709915b84b02bd569474d476b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 19:56:31 +0200 Subject: [PATCH 233/357] Rename config and metadata join functions to extend_from_iter --- commons/zenoh-protocol/src/core/endpoint.rs | 8 ++++---- io/zenoh-transport/src/multicast/manager.rs | 4 +++- io/zenoh-transport/src/unicast/manager.rs | 8 ++++++-- io/zenoh-transport/tests/endpoints.rs | 4 ++-- .../tests/unicast_authenticator.rs | 4 ++-- io/zenoh-transport/tests/unicast_multilink.rs | 4 ++-- io/zenoh-transport/tests/unicast_openclose.rs | 4 ++-- io/zenoh-transport/tests/unicast_time.rs | 4 ++-- io/zenoh-transport/tests/unicast_transport.rs | 16 ++++++++-------- 9 files changed, 31 insertions(+), 25 deletions(-) diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 5c7cb891ae..735e329146 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -239,7 +239,7 @@ impl<'a> MetadataMut<'a> { } impl MetadataMut<'_> { - pub fn join<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> + pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> where I: Iterator + Clone, K: Borrow + 's + ?Sized, @@ -369,7 +369,7 @@ impl<'a> ConfigMut<'a> { } impl ConfigMut<'_> { - pub fn join<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> + pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> where I: Iterator + Clone, K: Borrow + 's + ?Sized, @@ -817,14 +817,14 @@ fn endpoints() { let mut endpoint = EndPoint::from_str("udp/127.0.0.1:7447").unwrap(); endpoint .metadata_mut() - .join([("a", "1"), ("c", "3"), ("b", "2")].iter().copied()) + .extend_from_iter([("a", "1"), ("c", "3"), ("b", "2")].iter().copied()) .unwrap(); assert_eq!(endpoint.as_str(), "udp/127.0.0.1:7447?a=1;b=2;c=3"); let mut endpoint = EndPoint::from_str("udp/127.0.0.1:7447").unwrap(); endpoint .config_mut() - .join([("A", "1"), ("C", "3"), ("B", "2")].iter().copied()) + .extend_from_iter([("A", "1"), ("C", "3"), ("B", "2")].iter().copied()) .unwrap(); assert_eq!(endpoint.as_str(), "udp/127.0.0.1:7447#A=1;B=2;C=3"); diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index 421664e954..173475cf55 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -261,7 +261,9 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().join(Parameters::iter(config))?; + endpoint + .config_mut() + .extend_from_iter(Parameters::iter(config))?; } // Open the link diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index ab31376788..eec9d05386 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -379,7 +379,9 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().join(Parameters::iter(config))?; + endpoint + .config_mut() + .extend_from_iter(Parameters::iter(config))?; }; manager.new_listener(endpoint).await } @@ -688,7 +690,9 @@ impl TransportManager { .await?; // Fill and merge the endpoint configuration if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { - endpoint.config_mut().join(Parameters::iter(config))?; + endpoint + .config_mut() + .extend_from_iter(Parameters::iter(config))?; }; // Create a new link associated by calling the Link Manager diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index def493e88f..50b2b80ff0 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -317,7 +317,7 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM let mut endpoint: EndPoint = format!("tls/localhost:{}", 7070).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), @@ -396,7 +396,7 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM let mut endpoint: EndPoint = format!("quic/localhost:{}", 7080).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index 63f1c785b7..0f31ef2453 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -802,7 +802,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 8030).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_CERTIFICATE_RAW, cert), @@ -902,7 +902,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 8040).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_CERTIFICATE_RAW, cert), diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index 54a31f62c3..99eb651b75 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -611,7 +611,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 18030).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), @@ -709,7 +709,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 18040).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index 3f57ebfd62..4071ab5c1d 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -639,7 +639,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), @@ -737,7 +737,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index 668df34cd6..12b29be1b5 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -398,7 +398,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), @@ -497,7 +497,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 2fffb2f811..0b150c0feb 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -994,7 +994,7 @@ async fn transport_unicast_tls_only_server() { let mut endpoint: EndPoint = format!("tls/localhost:{}", 16070).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1039,7 +1039,7 @@ async fn transport_unicast_quic_only_server() { let mut endpoint: EndPoint = format!("quic/localhost:{}", 16080).parse().unwrap(); endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1087,7 +1087,7 @@ async fn transport_unicast_tls_only_mutual_success() { let mut client_endpoint: EndPoint = ("tls/localhost:10461").parse().unwrap(); client_endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_CLIENT_CERTIFICATE_RAW, CLIENT_CERT), @@ -1103,7 +1103,7 @@ async fn transport_unicast_tls_only_mutual_success() { let mut server_endpoint: EndPoint = ("tls/localhost:10461").parse().unwrap(); server_endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1157,14 +1157,14 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { let mut client_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); client_endpoint .config_mut() - .join([(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)].iter().copied()) + .extend_from_iter([(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)].iter().copied()) .unwrap(); // Define the locator let mut server_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); server_endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1223,7 +1223,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let mut client_endpoint: EndPoint = ("tls/localhost:10463").parse().unwrap(); client_endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), // Using the SERVER_CERT and SERVER_KEY in the client to simulate the case the client has @@ -1243,7 +1243,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let mut server_endpoint: EndPoint = ("tls/localhost:10463").parse().unwrap(); server_endpoint .config_mut() - .join( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), From eed331e21bb09a6e52e82a768d683aa35e29d338 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 20:10:50 +0200 Subject: [PATCH 234/357] Selector new --- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- zenoh/src/selector.rs | 36 +++++++++++++++++++++------- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index fc74ca5421..06341e46a7 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -403,7 +403,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { pub const TIME_RANGE_KEY: &str = "_time"; impl<'a> Selector<'a> { + /// Builds a new selector + pub fn new(key_expr: K, parameters: P) -> Self + where + K: Into>, + P: Into>, + { + Self { + key_expr: key_expr.into(), + parameters: parameters.into(), + } + } + /// Gets the parameters. pub fn parameters(&self) -> &Parameters { &self.parameters @@ -85,11 +97,20 @@ impl<'a> Selector<'a> { &mut self.parameters } + /// Sets the `parameters` part of this `Selector`. + #[inline(always)] + pub fn set_parameters

(&mut self, parameters: P) + where + P: Into>, + { + self.parameters = parameters.into(); + } + /// Create an owned version of this selector with `'static` lifetime. pub fn into_owned(self) -> Selector<'static> { Selector { key_expr: self.key_expr.into_owned(), - parameters: Parameters(self.parameters.0.into_owned()), + parameters: self.parameters.into_owned(), } } @@ -104,14 +125,6 @@ impl<'a> Selector<'a> { self.parameters_mut().set_time_range(time_range); } - #[zenoh_macros::unstable] - /// Sets the `parameters` part of this `Selector`. - #[inline(always)] - pub fn with_parameters(mut self, parameters: &'a str) -> Self { - self.parameters = parameters.into(); - self - } - #[zenoh_macros::unstable] /// Extracts the standardized `_time` argument from the selector parameters. /// @@ -192,6 +205,11 @@ impl From> for HashMap { } impl Parameters<'_> { + /// Create an owned version of these parameters with `'static` lifetime. + pub fn into_owned(self) -> Parameters<'static> { + Parameters(self.0.into_owned()) + } + #[zenoh_macros::unstable] /// Sets the time range targeted by the selector. pub fn set_time_range>>(&mut self, time_range: T) { From 9033e69aa49e531dad24cf430c95d696e4dceb8e Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 21:18:34 +0200 Subject: [PATCH 235/357] Improve parameters --- commons/zenoh-protocol/src/core/endpoint.rs | 28 +++-- commons/zenoh-protocol/src/core/parameters.rs | 109 ++++++------------ commons/zenoh-protocol/src/core/properties.rs | 9 +- 3 files changed, 61 insertions(+), 85 deletions(-) diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 735e329146..a61fdd8e89 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -248,10 +248,10 @@ impl MetadataMut<'_> { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::join_sort( + Parameters::from_iter(Parameters::sort(Parameters::join( self.0.metadata().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), - ), + ))), self.0.config(), )?; @@ -379,10 +379,10 @@ impl ConfigMut<'_> { self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::join_sort( + Parameters::from_iter(Parameters::sort(Parameters::join( self.0.config().iter(), iter.map(|(k, v)| (k.borrow(), v.borrow())), - ), + ))), )?; self.0.inner = ep.inner; @@ -575,14 +575,20 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - Parameters::from_iter_sort_into(Parameters::iter(&s[midx + 1..]), &mut inner); + Parameters::from_iter_into( + Parameters::sort(Parameters::iter(&s[midx + 1..])), + &mut inner, + ); Ok(EndPoint { inner }) } // There is some config (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - Parameters::from_iter_sort_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + Parameters::from_iter_into( + Parameters::sort(Parameters::iter(&s[cidx + 1..])), + &mut inner, + ); Ok(EndPoint { inner }) } // There is some metadata and some config @@ -595,10 +601,16 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - Parameters::from_iter_sort_into(Parameters::iter(&s[midx + 1..cidx]), &mut inner); + Parameters::from_iter_into( + Parameters::sort(Parameters::iter(&s[midx + 1..cidx])), + &mut inner, + ); inner.push(CONFIG_SEPARATOR); - Parameters::from_iter_sort_into(Parameters::iter(&s[cidx + 1..]), &mut inner); + Parameters::from_iter_into( + Parameters::sort(Parameters::iter(&s[cidx + 1..])), + &mut inner, + ); Ok(EndPoint { inner }) } diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs index adf744130b..e4f815feff 100644 --- a/commons/zenoh-protocol/src/core/parameters.rs +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -44,93 +44,46 @@ impl Parameters { .map(|p| split_once(p, FIELD_SEPARATOR)) } - /// Builds a string from an iterator preserving the order. - #[allow(clippy::should_implement_trait)] - pub fn from_iter<'s, I>(iter: I) -> String - where - I: Iterator, - { - let mut into = String::new(); - Parameters::from_iter_into(iter, &mut into); - into - } - - /// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. - pub fn from_iter_into<'s, I>(iter: I, into: &mut String) - where - I: Iterator, - { - Parameters::concat_into(iter, into); - } - - /// Same as [`Self::from_iter`] but keys are sorted in alphabetical order. - pub fn from_iter_sort<'s, I>(iter: I) -> String - where - I: Iterator, - { - let mut into = String::new(); - Parameters::from_iter_into(iter, &mut into); - into - } - /// Same as [`Self::from_iter_into`] but keys are sorted in alphabetical order. - pub fn from_iter_sort_into<'s, I>(iter: I, into: &mut String) + pub fn sort<'s, I>(iter: I) -> impl Iterator where I: Iterator, { let mut from = iter.collect::>(); from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - Parameters::from_iter_into(from.iter().copied(), into); + from.into_iter() } - /// Builds a string by joining two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. - pub fn join<'s, C, N>(current: C, new: N) -> String + /// Joins two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. + pub fn join<'s, C, N>(current: C, new: N) -> impl Iterator + Clone where C: Iterator + Clone, - N: Iterator + Clone, - { - let mut into = String::new(); - Parameters::join_into(current, new, &mut into); - into - } - - /// Same as [`Self::join`] but it writes into a user-provided string instead of allocating a new one. - pub fn join_into<'s, C, N>(current: C, new: N, into: &mut String) - where - C: Iterator + Clone, - N: Iterator + Clone, + N: Iterator + Clone + 's, { let n = new.clone(); let current = current .clone() - .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); - let iter = current.chain(new); - Parameters::from_iter_into(iter, into); + .filter(move |(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); + current.chain(new) } - /// Same as [`Self::join`] but keys are sorted in alphabetical order. - pub fn join_sort<'s, C, N>(current: C, new: N) -> String + /// Builds a string from an iterator preserving the order. + #[allow(clippy::should_implement_trait)] + pub fn from_iter<'s, I>(iter: I) -> String where - C: Iterator + Clone, - N: Iterator + Clone, + I: Iterator, { let mut into = String::new(); - Parameters::join_sort_into(current, new, &mut into); + Parameters::from_iter_into(iter, &mut into); into } - /// Same as [`Self::join_into`] but keys are sorted in alphabetical order. - pub fn join_sort_into<'s, C, N>(current: C, new: N, into: &mut String) + /// Same as [`Self::from_iter`] but it writes into a user-provided string instead of allocating a new one. + pub fn from_iter_into<'s, I>(iter: I, into: &mut String) where - C: Iterator + Clone, - N: Iterator + Clone, + I: Iterator, { - let n = new.clone(); - let current = current - .clone() - .filter(|(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); - let iter = current.chain(new); - Parameters::from_iter_into(iter, into); + Parameters::concat_into(iter, into); } /// Get the a `&str`-value for a `&str`-key according to the parameters format. @@ -152,26 +105,32 @@ impl Parameters { } } - /// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. - pub fn insert<'s>(s: &'s str, k: &str, v: &str) -> (String, Option<&'s str>) { - let mut iter = Parameters::iter(s); + fn _insert<'s, I>( + i: I, + k: &'s str, + v: &'s str, + ) -> (impl Iterator, Option<&'s str>) + where + I: Iterator + Clone, + { + let mut iter = i.clone(); let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - let current = Parameters::iter(s).filter(|x| x.0 != k); + let current = i.filter(move |x| x.0 != k); let new = Some((k, v)).into_iter(); - let iter = current.chain(new); + (current.chain(new), item) + } + + /// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. + pub fn insert<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { + let (iter, item) = Parameters::_insert(Parameters::iter(s), k, v); (Parameters::from_iter(iter), item) } /// Same as [`Self::insert`] but keys are sorted in alphabetical order. pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { - let mut iter = Parameters::iter(s); - let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); - - let current = Parameters::iter(s).filter(|x| x.0 != k); - let new = Some((k, v)).into_iter(); - let iter = current.chain(new); - (Parameters::from_iter_sort(iter), item) + let (iter, item) = Parameters::_insert(Parameters::iter(s), k, v); + (Parameters::from_iter(Parameters::sort(iter)), item) } /// Remove a key-value `(&str, &str)` pair from `s` preserving the insertion order. diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 2611d2d7a9..3c83d00e75 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -128,7 +128,10 @@ impl<'s> Properties<'s> { K: Borrow + 'e + ?Sized, V: Borrow + 'e + ?Sized, { - let inner = Parameters::join(self.iter(), iter.map(|(k, v)| (k.borrow(), v.borrow()))); + let inner = Parameters::from_iter(Parameters::join( + self.iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + )); self.0 = Cow::Owned(inner); } @@ -341,7 +344,9 @@ impl<'s> OrderedProperties<'s> { fn order(&mut self) { if !self.0.is_ordered() { - self.0 = Properties(Cow::Owned(Parameters::from_iter_sort(self.iter()))); + self.0 = Properties(Cow::Owned(Parameters::from_iter(Parameters::sort( + self.iter(), + )))); } } } From 0bc257dab138223769ee267d5a46b9691aec1cda Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 21:19:25 +0200 Subject: [PATCH 236/357] Fix no_std CI --- commons/zenoh-protocol/src/core/properties.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 3c83d00e75..2fa71ec93f 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; -use alloc::borrow::Cow; +use alloc::{borrow::Cow, string::String}; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; From 2bf499db30e792ca13ece596cbfbb5ac22c102dc Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 21:40:04 +0200 Subject: [PATCH 237/357] Fix no_std CI --- commons/zenoh-protocol/src/core/properties.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 2fa71ec93f..67a1edba7e 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -12,7 +12,10 @@ // ZettaScale Zenoh Team, // use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; -use alloc::{borrow::Cow, string::String}; +use alloc::{ + borrow::Cow, + string::{String, ToString}, +}; use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; From 6d531588ac58bb1daad82047d8f41aa05ded9855 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 22:31:18 +0200 Subject: [PATCH 238/357] Fix doctest --- commons/zenoh-protocol/src/core/properties.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index 67a1edba7e..a4c2c35197 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -26,7 +26,7 @@ use std::collections::HashMap; /// /// Example: /// ``` -/// use zenoh_collections::Properties; +/// use zenoh_protocol::core::Properties; /// /// let a = "a=1;b=2;c=3|4|5;d=6"; /// let p = Properties::from(a); From ce9495c80df5d245859dbff25cf2e49a9f4ebba5 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 15 Apr 2024 22:51:11 +0200 Subject: [PATCH 239/357] Fix unused import --- zenoh/src/selector.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 076b1c0875..659e6695ca 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -25,8 +25,8 @@ use zenoh_protocol::core::{ key_expr::{keyexpr, OwnedKeyExpr}, Properties, }; -use zenoh_result::ZResult; -use zenoh_util::time_range::TimeRange; +#[cfg(feature = "unstable")] +use ::{zenoh_result::ZResult, zenoh_util::time_range::TimeRange}; /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters From 099c77f486f5d63d302fb9e6d4e6b29dc2bb538b Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 11:53:30 +0200 Subject: [PATCH 240/357] Fix docstring --- zenoh/src/selector.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 659e6695ca..fe5394ea8e 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -88,15 +88,13 @@ impl<'a> Selector<'a> { pub fn key_expr(&'a self) -> &KeyExpr<'a> { &self.key_expr } - /// Gets the parameters as a raw string. + + /// Gets a reference to selector's [`Parameters`]. pub fn parameters(&self) -> &Parameters<'a> { &self.parameters } - /// Gets a mutable reference to the parameters as a String. - /// - /// Note that calling this function may cause an allocation and copy if the selector's parameters wasn't - /// already owned by `self`. `self` owns its parameters as soon as this function returns. + /// Gets a mutable reference to selector's [`Parameters`]. pub fn parameters_mut(&mut self) -> &mut Parameters<'a> { &mut self.parameters } From 131ccdd5e5620a648f5ea1d43480d061e79ec62c Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Tue, 16 Apr 2024 11:55:08 +0200 Subject: [PATCH 241/357] make Reply fields pub(crate) and add accessors (#929) * make Reply fields pub(crate) and add accessors * doctests fix * Rename sample() and try_into_sample() to result() and into_result() * Fix valgrind CI * Fix doctest --------- Co-authored-by: Luca Cominardi --- .../src/queryable_get/bin/z_queryable_get.rs | 2 +- examples/examples/z_get.rs | 2 +- examples/examples/z_get_liveliness.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 14 ++++---- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/storage.rs | 2 +- .../tests/operations.rs | 2 +- .../tests/wildcard.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/lib.rs | 2 +- zenoh/src/lib.rs | 2 +- zenoh/src/liveliness.rs | 8 ++--- zenoh/src/query.rs | 35 +++++++++++++++---- zenoh/src/session.rs | 28 +++++++-------- zenoh/tests/attachments.rs | 2 +- zenoh/tests/events.rs | 8 ++--- zenoh/tests/liveliness.rs | 5 ++- zenoh/tests/routing.rs | 2 +- zenoh/tests/session.rs | 6 ++-- zenoh/tests/unicity.rs | 2 +- 21 files changed, 78 insertions(+), 54 deletions(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index a5111c11e3..80549ead27 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -60,7 +60,7 @@ async fn main() { .await .unwrap(); while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => println!( ">> Received ('{}': '{}')", sample.key_expr().as_str(), diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 77b67b90ed..7295294a00 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -41,7 +41,7 @@ async fn main() { .await .unwrap(); while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => { let payload = sample .payload() diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 0a15b287c7..49c211f322 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -36,7 +36,7 @@ async fn main() { .await .unwrap(); while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr().as_str(),), Err(err) => { let payload = err diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 1068d07163..0c5f639eca 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -92,9 +92,9 @@ fn sample_to_json(sample: &Sample) -> JSONSample { } } -fn result_to_json(sample: Result) -> JSONSample { +fn result_to_json(sample: Result<&Sample, &Value>) -> JSONSample { match sample { - Ok(sample) => sample_to_json(&sample), + Ok(sample) => sample_to_json(sample), Err(err) => JSONSample { key: "ERROR".into(), value: payload_to_json(err.payload(), err.encoding()), @@ -107,7 +107,7 @@ fn result_to_json(sample: Result) -> JSONSample { async fn to_json(results: flume::Receiver) -> String { let values = results .stream() - .filter_map(move |reply| async move { Some(result_to_json(reply.sample)) }) + .filter_map(move |reply| async move { Some(result_to_json(reply.result())) }) .collect::>() .await; @@ -122,7 +122,7 @@ async fn to_json_response(results: flume::Receiver) -> Response { ) } -fn sample_to_html(sample: Sample) -> String { +fn sample_to_html(sample: &Sample) -> String { format!( "

{}
\n
{}
\n", sample.key_expr().as_str(), @@ -133,7 +133,7 @@ fn sample_to_html(sample: Sample) -> String { ) } -fn result_to_html(sample: Result) -> String { +fn result_to_html(sample: Result<&Sample, &Value>) -> String { match sample { Ok(sample) => sample_to_html(sample), Err(err) => { @@ -148,7 +148,7 @@ fn result_to_html(sample: Result) -> String { async fn to_html(results: flume::Receiver) -> String { let values = results .stream() - .filter_map(move |reply| async move { Some(result_to_html(reply.sample)) }) + .filter_map(move |reply| async move { Some(result_to_html(reply.result())) }) .collect::>() .await .join("\n"); @@ -161,7 +161,7 @@ async fn to_html_response(results: flume::Receiver) -> Response { async fn to_raw_response(results: flume::Receiver) -> Response { match results.recv_async().await { - Ok(reply) => match reply.sample { + Ok(reply) => match reply.result() { Ok(sample) => response( StatusCode::Ok, Cow::from(sample.encoding()).as_ref(), diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 3a37095f67..bc98f61009 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -231,7 +231,7 @@ impl AlignQueryable { // get corresponding key from log let replies = self.session.get(&logentry.key).res().await.unwrap(); if let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.into_result() { Ok(sample) => { log::trace!( "[ALIGN QUERYABLE] Received ('{}': '{}')", diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index f33b370200..8b8fe6753a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -332,7 +332,7 @@ impl Aligner { { Ok(replies) => { while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.into_result() { Ok(sample) => { log::trace!( "[ALIGNER] Received ('{}': '{}')", diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index d2c2984c21..0c49852cfa 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -649,7 +649,7 @@ impl StorageService { } }; while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.into_result() { Ok(sample) => { self.process_sample(sample).await; } diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 36162f01c2..c82459cdcc 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -50,7 +50,7 @@ async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { println!("Getting replies on '{key_expr}': '{replies:?}'..."); let mut samples = Vec::new(); for reply in replies { - if let Ok(sample) = reply.sample { + if let Ok(sample) = reply.into_result() { samples.push(sample); } } diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 5a71dc23f0..d778eadde4 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -51,7 +51,7 @@ async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { println!("Getting replies on '{key_expr}': '{replies:?}'..."); let mut samples = Vec::new(); for reply in replies { - if let Ok(sample) = reply.sample { + if let Ok(sample) = reply.into_result() { samples.push(sample); } } diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 8a7823ed72..bd36652850 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -306,7 +306,7 @@ async fn net_event_handler(z: Arc, state: Arc) { let receiver = z.get(&qres).consolidation(qc).res().await.unwrap(); while let Ok(reply) = receiver.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => { match bincode::deserialize_from::( sample.payload().reader(), diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 7ac880fd8c..80d01d7846 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -61,6 +61,6 @@ pub trait ExtractSample { impl ExtractSample for Reply { fn extract(self) -> ZResult { - self.sample.map_err(|e| zerror!("{:?}", e).into()) + self.into_result().map_err(|e| zerror!("{:?}", e).into()) } } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 90b4b2af58..8de143fd8d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -70,7 +70,7 @@ //! let session = zenoh::open(config::default()).res().await.unwrap(); //! let replies = session.get("key/expression").res().await.unwrap(); //! while let Ok(reply) = replies.recv_async().await { -//! println!(">> Received {:?}", reply.sample); +//! println!(">> Received {:?}", reply.result()); //! } //! } //! ``` diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 0b539ba636..e55b0a90dc 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -174,7 +174,7 @@ impl<'a> Liveliness<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let replies = session.liveliness().get("key/expression").res().await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// if let Ok(sample) = reply.sample { + /// if let Ok(sample) = reply.result() { /// println!(">> Liveliness token {}", sample.key_expr()); /// } /// } @@ -606,7 +606,7 @@ where /// .await /// .unwrap(); /// while let Ok(token) = tokens.recv_async().await { -/// match token.sample { +/// match token.result() { /// Ok(sample) => println!("Alive token ('{}')", sample.key_expr().as_str()), /// Err(err) => println!("Received (ERROR: '{:?}')", err.payload()), /// } @@ -635,7 +635,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// let queryable = session /// .liveliness() /// .get("key/expression") - /// .callback(|reply| {println!("Received {:?}", reply.sample);}) + /// .callback(|reply| { println!("Received {:?}", reply.result()); }) /// .res() /// .await /// .unwrap(); @@ -710,7 +710,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// println!("Received {:?}", reply.sample); + /// println!("Received {:?}", reply.result()); /// } /// # } /// ``` diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index d089290326..901dcd18ae 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -77,10 +77,31 @@ impl Default for QueryConsolidation { #[non_exhaustive] #[derive(Clone, Debug)] pub struct Reply { - /// The result of this Reply. - pub sample: Result, - /// The id of the zenoh instance that answered this Reply. - pub replier_id: ZenohId, + pub(crate) result: Result, + pub(crate) replier_id: ZenohId, +} + +impl Reply { + /// Gets the a borrowed result of this `Reply`. Use [`Reply::into_result`] to take ownership of the result. + pub fn result(&self) -> Result<&Sample, &Value> { + self.result.as_ref() + } + + /// Converts this `Reply` into the its result. Use [`Reply::result`] it you don't want to take ownership. + pub fn into_result(self) -> Result { + self.result + } + + /// Gets the id of the zenoh instance that answered this Reply. + pub fn replier_id(&self) -> ZenohId { + self.replier_id + } +} + +impl From for Result { + fn from(value: Reply) -> Self { + value.into_result() + } } pub(crate) struct QueryState { @@ -110,7 +131,7 @@ pub(crate) struct QueryState { /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { -/// println!("Received {:?}", reply.sample) +/// println!("Received {:?}", reply.result()) /// } /// # } /// ``` @@ -209,7 +230,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let queryable = session /// .get("key/expression") - /// .callback(|reply| {println!("Received {:?}", reply.sample);}) + /// .callback(|reply| {println!("Received {:?}", reply.result());}) /// .res() /// .await /// .unwrap(); @@ -302,7 +323,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// println!("Received {:?}", reply.sample); + /// println!("Received {:?}", reply.result()); /// } /// # } /// ``` diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 29ad9c2b00..c73b791a96 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -784,7 +784,7 @@ impl Session { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let replies = session.get("key/expression").res().await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// println!(">> Received {:?}", reply.sample); + /// println!(">> Received {:?}", reply.result()); /// } /// # } /// ``` @@ -1653,7 +1653,7 @@ impl Session { } } (query.callback)(Reply { - sample: Err("Timeout".into()), + result: Err("Timeout".into()), replier_id: zid, }); } @@ -2171,7 +2171,7 @@ impl Primitives for Session { }; let new_reply = Reply { replier_id, - sample: Err(value), + result: Err(value), }; callback(new_reply); } @@ -2292,7 +2292,7 @@ impl Primitives for Session { attachment, ); let new_reply = Reply { - sample: Ok(sample), + result: Ok(sample), replier_id: ZenohId::rand(), // TODO }; let callback = @@ -2302,15 +2302,15 @@ impl Primitives for Session { } ConsolidationMode::Monotonic => { match query.replies.as_ref().unwrap().get( - new_reply.sample.as_ref().unwrap().key_expr.as_keyexpr(), + new_reply.result.as_ref().unwrap().key_expr.as_keyexpr(), ) { Some(reply) => { - if new_reply.sample.as_ref().unwrap().timestamp - > reply.sample.as_ref().unwrap().timestamp + if new_reply.result.as_ref().unwrap().timestamp + > reply.result.as_ref().unwrap().timestamp { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr @@ -2326,7 +2326,7 @@ impl Primitives for Session { None => { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr @@ -2340,15 +2340,15 @@ impl Primitives for Session { } Consolidation::Auto | ConsolidationMode::Latest => { match query.replies.as_ref().unwrap().get( - new_reply.sample.as_ref().unwrap().key_expr.as_keyexpr(), + new_reply.result.as_ref().unwrap().key_expr.as_keyexpr(), ) { Some(reply) => { - if new_reply.sample.as_ref().unwrap().timestamp - > reply.sample.as_ref().unwrap().timestamp + if new_reply.result.as_ref().unwrap().timestamp + > reply.result.as_ref().unwrap().timestamp { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr @@ -2361,7 +2361,7 @@ impl Primitives for Session { None => { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 844e2985bc..df9ebcca2e 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -113,7 +113,7 @@ fn attachment_queries() { .res() .unwrap(); while let Ok(reply) = get.recv() { - let response = reply.sample.as_ref().unwrap(); + let response = reply.result().unwrap(); for (k, v) in response.attachment().unwrap().iter::<( [u8; std::mem::size_of::()], [u8; std::mem::size_of::()], diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 201f4941f9..9c807bd121 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -75,8 +75,8 @@ async fn zenoh_events() { .into_iter() .collect(); assert!(replies.len() == 1); - assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr().as_str(); + assert!(replies[0].result().is_ok()); + let key_expr = replies[0].result().unwrap().key_expr().as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); let replies: Vec = ztimeout!(session @@ -86,8 +86,8 @@ async fn zenoh_events() { .into_iter() .collect(); assert!(replies.len() == 1); - assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr().as_str(); + assert!(replies[0].result().is_ok()); + let key_expr = replies[0].result().unwrap().key_expr().as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); close_session(session2).await; diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 0e2870d808..fe6ac99571 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -53,7 +53,10 @@ async fn zenoh_liveliness() { .get("zenoh_liveliness_test") .res_async()) .unwrap(); - let sample = ztimeout!(replies.recv_async()).unwrap().sample.unwrap(); + let sample: Sample = ztimeout!(replies.recv_async()) + .unwrap() + .into_result() + .unwrap(); assert!(sample.kind() == SampleKind::Put); assert!(sample.key_expr().as_str() == "zenoh_liveliness_test"); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 56bacd7fdd..1323dc4b08 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -100,7 +100,7 @@ impl Task { replies = session.get(ke).timeout(Duration::from_secs(10)).res() => { let replies = replies?; while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => { let recv_size = sample.payload().len(); if recv_size != *expected_size { diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 603ebdac49..ca67c450fd 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -193,7 +193,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let selector = format!("{}?ok_put", key_expr); let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - let s = s.sample.unwrap(); + let s = s.result().unwrap(); assert_eq!(s.kind(), SampleKind::Put); assert_eq!(s.payload().len(), size); cnt += 1; @@ -211,7 +211,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let selector = format!("{}?ok_del", key_expr); let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - let s = s.sample.unwrap(); + let s = s.result().unwrap(); assert_eq!(s.kind(), SampleKind::Delete); assert_eq!(s.payload().len(), 0); cnt += 1; @@ -229,7 +229,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let selector = format!("{}?err", key_expr); let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - let e = s.sample.unwrap_err(); + let e = s.result().unwrap_err(); assert_eq!(e.payload().len(), size); cnt += 1; } diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index f34704fb7e..78eded580c 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -234,7 +234,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { for _ in 0..msg_count { let rs = ztimeout!(s03.get(cke.clone()).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().payload().len(), size); + assert_eq!(s.result().unwrap().payload().len(), size); cnt += 1; } } From 9d25e9838c4d35dcdd98fa3c9b9cf09a7c27e819 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 11:55:28 +0200 Subject: [PATCH 242/357] Remove accept_any_reply from selector --- zenoh/src/query.rs | 3 ++- zenoh/src/selector.rs | 7 +------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 2a94620237..14a30e983a 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -387,7 +387,8 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { Self { selector: self.selector.map(|mut s| { - s.set_accept_any_keyexpr(accept == ReplyKeyExpr::Any); + s.parameters_mut() + .set_accept_any_keyexpr(accept == ReplyKeyExpr::Any); s }), ..self diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index fe5394ea8e..606e2022cd 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -135,11 +135,6 @@ impl<'a> Selector<'a> { self.parameters().time_range() } - #[cfg(any(feature = "unstable", test))] - pub(crate) fn set_accept_any_keyexpr>>(&mut self, anyke: T) { - self.parameters_mut().set_accept_any_keyexpr(anyke); - } - #[cfg(any(feature = "unstable", test))] pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { self.parameters().accept_any_keyexpr() @@ -411,7 +406,7 @@ fn selector_accessors() { selector.parameters_mut().extend_from_iter(hm.iter()); assert_eq!(selector.parameters().get("_filter").unwrap(), ""); - selector.set_accept_any_keyexpr(true); + selector.parameters_mut().set_accept_any_keyexpr(true); println!("Parameters end: {}", selector.parameters()); for i in selector.parameters().iter() { From c8b3345e898d726ff8b3f151ae87c8595642d000 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 11:59:20 +0200 Subject: [PATCH 243/357] Remove accept_any_reply from parameters --- zenoh/src/query.rs | 5 +++-- zenoh/src/selector.rs | 21 +++------------------ 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 14a30e983a..47ae8faef4 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -387,8 +387,9 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { Self { selector: self.selector.map(|mut s| { - s.parameters_mut() - .set_accept_any_keyexpr(accept == ReplyKeyExpr::Any); + if accept == ReplyKeyExpr::Any { + s.parameters_mut().insert(_REPLY_KEY_EXPR_ANY_SEL_PARAM, ""); + } s }), ..self diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 606e2022cd..bad41b704a 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -228,23 +228,6 @@ impl Parameters<'_> { } } - #[cfg(any(feature = "unstable", test))] - pub(crate) fn set_accept_any_keyexpr>>(&mut self, anyke: T) { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; - - let mut anyke: Option = anyke.into(); - match anyke.take() { - Some(ak) => { - if ak { - self.0.insert(ANYKE, "") - } else { - self.0.insert(ANYKE, "false") - } - } - None => self.0.remove(ANYKE), - }; - } - #[cfg(any(feature = "unstable", test))] pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; @@ -374,6 +357,8 @@ impl<'a> From> for Selector<'a> { #[test] fn selector_accessors() { + use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; + let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); for selector in [ "hello/there?_timetrick", @@ -406,7 +391,7 @@ fn selector_accessors() { selector.parameters_mut().extend_from_iter(hm.iter()); assert_eq!(selector.parameters().get("_filter").unwrap(), ""); - selector.parameters_mut().set_accept_any_keyexpr(true); + selector.parameters_mut().insert(ANYKE, ""); println!("Parameters end: {}", selector.parameters()); for i in selector.parameters().iter() { From f70143ce64491ef9fc9407bad04659436a9d78a8 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 12:02:43 +0200 Subject: [PATCH 244/357] Remove accept_any_keyexpr from selector and parameters --- zenoh/src/queryable.rs | 6 +++++- zenoh/src/selector.rs | 15 --------------- zenoh/src/session.rs | 5 ++++- 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 3f88c6dcd8..000a84d54d 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -222,7 +222,11 @@ impl Query { } #[cfg(feature = "unstable")] fn _accepts_any_replies(&self) -> ZResult { - Ok(self.parameters().accept_any_keyexpr()?.unwrap_or(false)) + use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; + + Ok(self + .parameters() + .contains_key(_REPLY_KEY_EXPR_ANY_SEL_PARAM)) } } diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index bad41b704a..4a26d4ae63 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -134,11 +134,6 @@ impl<'a> Selector<'a> { pub fn time_range(&self) -> ZResult> { self.parameters().time_range() } - - #[cfg(any(feature = "unstable", test))] - pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { - self.parameters().accept_any_keyexpr() - } } /// A wrapper type to help decode zenoh selector parameters. @@ -227,16 +222,6 @@ impl Parameters<'_> { None => Ok(None), } } - - #[cfg(any(feature = "unstable", test))] - pub(crate) fn accept_any_keyexpr(&self) -> ZResult> { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; - - match self.0.get(ANYKE) { - Some(ak) => Ok(Some(ak.parse()?)), - None => Ok(None), - } - } } impl std::fmt::Debug for Selector<'_> { diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index ef1d56ac32..e01cbb2364 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2189,7 +2189,10 @@ impl Primitives for Session { Some(query) => { let c = zcondfeat!( "unstable", - !matches!(query.selector.accept_any_keyexpr(), Ok(Some(true))), + !query + .selector + .parameters() + .contains_key(_REPLY_KEY_EXPR_ANY_SEL_PARAM), true ); if c && !query.selector.key_expr.intersects(&key_expr) { From 9a43b8322fe1a9f2f1ad5c91555c917ebe7ae649 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 12:05:36 +0200 Subject: [PATCH 245/357] Remove time_range from selector --- zenoh-ext/src/publication_cache.rs | 4 ++-- zenoh/src/selector.rs | 23 ++++++----------------- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index bbc90c0e8f..d11ef90537 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -208,7 +208,7 @@ impl<'a> PublicationCache<'a> { if !query.selector().key_expr().as_str().contains('*') { if let Some(queue) = cache.get(query.selector().key_expr().as_keyexpr()) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { + if let (Ok(Some(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } @@ -222,7 +222,7 @@ impl<'a> PublicationCache<'a> { for (key_expr, queue) in cache.iter() { if query.selector().key_expr().intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { + if let (Ok(Some(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 4a26d4ae63..21be85b49e 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -120,20 +120,6 @@ impl<'a> Selector<'a> { pub fn split(self) -> (KeyExpr<'a>, Parameters<'a>) { (self.key_expr, self.parameters) } - - #[zenoh_macros::unstable] - /// Sets the time range targeted by the selector. - pub fn set_time_range>>(&mut self, time_range: T) { - self.parameters_mut().set_time_range(time_range); - } - - #[zenoh_macros::unstable] - /// Extracts the standardized `_time` argument from the selector parameters. - /// - /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. - pub fn time_range(&self) -> ZResult> { - self.parameters().time_range() - } } /// A wrapper type to help decode zenoh selector parameters. @@ -216,7 +202,7 @@ impl Parameters<'_> { /// Extracts the standardized `_time` argument from the selector parameters. /// /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. - fn time_range(&self) -> ZResult> { + pub fn time_range(&self) -> ZResult> { match self.0.get(TIME_RANGE_KEY) { Some(tr) => Ok(Some(tr.parse()?)), None => Ok(None), @@ -360,8 +346,11 @@ fn selector_accessors() { assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); - selector.set_time_range(time_range); - assert_eq!(selector.time_range().unwrap().unwrap(), time_range); + selector.parameters_mut().set_time_range(time_range); + assert_eq!( + selector.parameters().time_range().unwrap().unwrap(), + time_range + ); assert!(selector.parameters().contains_key(TIME_RANGE_KEY)); let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); From 3ffded929820f6d28d1a1fb4afbfd092a86d0899 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 12:16:01 +0200 Subject: [PATCH 246/357] Improve docs --- zenoh/src/selector.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 21be85b49e..15ce36faa8 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -99,7 +99,7 @@ impl<'a> Selector<'a> { &mut self.parameters } - /// Sets the `parameters` part of this `Selector`. + /// Sets the parameters of this selector. This operation completly overwrites existing [`Parameters`]. #[inline(always)] pub fn set_parameters

(&mut self, parameters: P) where From 1bb65cc847729a9368cd50926c505738fbc00265 Mon Sep 17 00:00:00 2001 From: DenisBiryukov91 <155981813+DenisBiryukov91@users.noreply.github.com> Date: Tue, 16 Apr 2024 13:49:27 +0200 Subject: [PATCH 247/357] make receiver fields pub(crate) in Subscriber, Queryable, Scout and MatchingListener (#930) * make receiver fields pub(crate) in Subscriber, Quryable, Scout and MatchingListener * Add handler() and handler_mut() to Subscriber and Queryable * Fix conflict generic names --------- Co-authored-by: Luca Cominardi --- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 2 +- zenoh-ext/src/publication_cache.rs | 6 ++-- zenoh-ext/src/querying_subscriber.rs | 24 ++++++------- zenoh-ext/src/subscriber_ext.rs | 2 +- zenoh/src/handlers/mod.rs | 2 +- zenoh/src/liveliness.rs | 4 +-- zenoh/src/publication.rs | 2 +- zenoh/src/queryable.rs | 36 ++++++++++++++----- zenoh/src/scouting.rs | 2 +- zenoh/src/subscriber.rs | 32 ++++++++++++----- 10 files changed, 73 insertions(+), 39 deletions(-) diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index bb76005d6e..ac3e7bacfe 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -46,7 +46,7 @@ async fn main() { let queryable = session.declare_queryable(key).res().await.unwrap(); async_std::task::spawn({ - let receiver = queryable.receiver.clone(); + let receiver = queryable.handler().clone(); async move { while let Ok(request) = receiver.recv_async().await { request.reply(key, HTML).res().await.unwrap(); diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index d11ef90537..71b6e6b26b 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -160,8 +160,8 @@ impl<'a> PublicationCache<'a> { let queryable = queryable.res_sync()?; // take local ownership of stuff to be moved into task - let sub_recv = local_sub.receiver.clone(); - let quer_recv = queryable.receiver.clone(); + let sub_recv = local_sub.handler().clone(); + let quer_recv = queryable.handler().clone(); let pub_key_expr = key_expr.into_owned(); let resources_limit = conf.resources_limit; let history = conf.history; @@ -202,7 +202,7 @@ impl<'a> PublicationCache<'a> { } }, - // on query, reply with cach content + // on query, reply with cache content query = quer_recv.recv_async() => { if let Ok(query) = query { if !query.selector().key_expr().as_str().contains('*') { diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index d749a94ed9..090cc88ff0 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -614,38 +614,38 @@ where /// } /// # } /// ``` -pub struct FetchingSubscriber<'a, Receiver> { +pub struct FetchingSubscriber<'a, Handler> { subscriber: Subscriber<'a, ()>, callback: Arc, state: Arc>, - receiver: Receiver, + handler: Handler, } -impl std::ops::Deref for FetchingSubscriber<'_, Receiver> { - type Target = Receiver; +impl std::ops::Deref for FetchingSubscriber<'_, Handler> { + type Target = Handler; fn deref(&self) -> &Self::Target { - &self.receiver + &self.handler } } -impl std::ops::DerefMut for FetchingSubscriber<'_, Receiver> { +impl std::ops::DerefMut for FetchingSubscriber<'_, Handler> { fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.receiver + &mut self.handler } } -impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { +impl<'a, Handler> FetchingSubscriber<'a, Handler> { fn new< KeySpace, - Handler, + InputHandler, Fetch: FnOnce(Box) -> ZResult<()> + Send + Sync, TryIntoSample, >( - conf: FetchingSubscriberBuilder<'a, 'a, KeySpace, Handler, Fetch, TryIntoSample>, + conf: FetchingSubscriberBuilder<'a, 'a, KeySpace, InputHandler, Fetch, TryIntoSample>, ) -> ZResult where KeySpace: Into, - Handler: IntoHandler<'static, Sample, Handler = Receiver> + Send, + InputHandler: IntoHandler<'static, Sample, Handler = Handler> + Send, TryIntoSample: ExtractSample + Send + Sync, { let state = Arc::new(Mutex::new(InnerState { @@ -698,7 +698,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { subscriber, callback, state, - receiver, + handler: receiver, }; // run fetch diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 7d77fac05b..4b6346bcf8 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -38,7 +38,7 @@ where { type Output = Forward, fn(Sample) -> Result>, S>; fn forward(&'a mut self, sink: S) -> Self::Output { - futures::StreamExt::forward(futures::StreamExt::map(self.receiver.stream(), Ok), sink) + futures::StreamExt::forward(futures::StreamExt::map(self.stream(), Ok), sink) } } diff --git a/zenoh/src/handlers/mod.rs b/zenoh/src/handlers/mod.rs index 627c166795..2abd5b134a 100644 --- a/zenoh/src/handlers/mod.rs +++ b/zenoh/src/handlers/mod.rs @@ -26,7 +26,7 @@ use crate::API_DATA_RECEPTION_CHANNEL_SIZE; /// An alias for `Arc`. pub type Dyn = std::sync::Arc; -/// A type that can be converted into a [`Callback`]-handler pair. +/// A type that can be converted into a [`Callback`]-Handler pair. /// /// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, /// while granting you access to the handler through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index e55b0a90dc..33022debbe 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -554,7 +554,7 @@ where fn res_sync(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_handler(); + let (callback, handler) = self.handler.into_handler(); session .declare_subscriber_inner( &key_expr, @@ -569,7 +569,7 @@ where state: sub_state, alive: true, }, - receiver, + handler, }) } } diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index e3d43993f3..afe61cb3c4 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -1402,7 +1402,7 @@ impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListene #[zenoh_macros::unstable] pub struct MatchingListener<'a, Receiver> { pub(crate) listener: MatchingListenerInner<'a>, - pub receiver: Receiver, + pub(crate) receiver: Receiver, } #[zenoh_macros::unstable] diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 000a84d54d..755e0364af 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -32,7 +32,7 @@ use crate::{ }; use std::fmt; use std::future::Ready; -use std::ops::Deref; +use std::ops::{Deref, DerefMut}; use std::sync::Arc; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; @@ -844,12 +844,12 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// ``` #[non_exhaustive] #[derive(Debug)] -pub struct Queryable<'a, Receiver> { +pub struct Queryable<'a, Handler> { pub(crate) queryable: CallbackQueryable<'a>, - pub receiver: Receiver, + pub(crate) handler: Handler, } -impl<'a, Receiver> Queryable<'a, Receiver> { +impl<'a, Handler> Queryable<'a, Handler> { /// Returns the [`EntityGlobalId`] of this Queryable. /// /// # Examples @@ -874,6 +874,20 @@ impl<'a, Receiver> Queryable<'a, Receiver> { } } + /// Returns a reference to this queryable's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler(&self) -> &Handler { + &self.handler + } + + /// Returns a mutable reference to this queryable's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler_mut(&mut self) -> &mut Handler { + &mut self.handler + } + #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { Undeclarable::undeclare_inner(self, ()) @@ -886,11 +900,17 @@ impl<'a, T> Undeclarable<(), QueryableUndeclaration<'a>> for Queryable<'a, T> { } } -impl Deref for Queryable<'_, Receiver> { - type Target = Receiver; +impl Deref for Queryable<'_, Handler> { + type Target = Handler; fn deref(&self) -> &Self::Target { - &self.receiver + self.handler() + } +} + +impl DerefMut for Queryable<'_, Handler> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.handler_mut() } } @@ -923,7 +943,7 @@ where state: qable_state, alive: true, }, - receiver, + handler: receiver, }) } } diff --git a/zenoh/src/scouting.rs b/zenoh/src/scouting.rs index 41d0401d56..072c8dee8b 100644 --- a/zenoh/src/scouting.rs +++ b/zenoh/src/scouting.rs @@ -269,7 +269,7 @@ impl fmt::Debug for ScoutInner { #[derive(Debug)] pub struct Scout { pub(crate) scout: ScoutInner, - pub receiver: Receiver, + pub(crate) receiver: Receiver, } impl Deref for Scout { diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 47d41ebb1f..ded2a1acdc 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -394,7 +394,7 @@ where state: sub_state, alive: true, }, - receiver, + handler: receiver, }) } } @@ -440,12 +440,12 @@ where /// ``` #[non_exhaustive] #[derive(Debug)] -pub struct Subscriber<'a, Receiver> { +pub struct Subscriber<'a, Handler> { pub(crate) subscriber: SubscriberInner<'a>, - pub receiver: Receiver, + pub(crate) handler: Handler, } -impl<'a, Receiver> Subscriber<'a, Receiver> { +impl<'a, Handler> Subscriber<'a, Handler> { /// Returns the [`EntityGlobalId`] of this Subscriber. /// /// # Examples @@ -475,6 +475,20 @@ impl<'a, Receiver> Subscriber<'a, Receiver> { &self.subscriber.state.key_expr } + /// Returns a reference to this subscriber's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler(&self) -> &Handler { + &self.handler + } + + /// Returns a mutable reference to this subscriber's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler_mut(&mut self) -> &mut Handler { + &mut self.handler + } + /// Close a [`Subscriber`]. /// /// Subscribers are automatically closed when dropped, but you may want to use this function to handle errors or @@ -506,16 +520,16 @@ impl<'a, T> Undeclarable<(), SubscriberUndeclaration<'a>> for Subscriber<'a, T> } } -impl Deref for Subscriber<'_, Receiver> { - type Target = Receiver; +impl Deref for Subscriber<'_, Handler> { + type Target = Handler; fn deref(&self) -> &Self::Target { - &self.receiver + self.handler() } } -impl DerefMut for Subscriber<'_, Receiver> { +impl DerefMut for Subscriber<'_, Handler> { fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.receiver + self.handler_mut() } } From a0db071832ba0bbf2cf0ebac7e501a97785c58ee Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 16 Apr 2024 14:50:13 +0200 Subject: [PATCH 248/357] compilation fixes --- Cargo.lock | 3 +-- commons/zenoh-codec/src/network/interest.rs | 4 ++-- examples/Cargo.toml | 1 - plugins/zenoh-plugin-example/src/lib.rs | 1 - .../src/replica/aligner.rs | 6 +++--- .../src/replica/storage.rs | 8 ++++---- zenoh/src/admin.rs | 4 ++-- zenoh/src/handlers/callback.rs | 2 +- zenoh/src/handlers/fifo.rs | 2 +- zenoh/src/handlers/ring.rs | 2 +- zenoh/src/net/primitives/mux.rs | 6 +++--- zenoh/src/net/routing/dispatcher/pubsub.rs | 6 +++--- zenoh/src/net/routing/dispatcher/queries.rs | 4 ++-- zenoh/src/sample/mod.rs | 2 +- zenoh/src/session.rs | 10 ---------- 15 files changed, 24 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e6a795b6b..754177e657 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2334,7 +2334,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -5113,7 +5113,6 @@ dependencies = [ "clap", "flume", "futures", - "log", "phf", "serde", "serde_cbor", diff --git a/commons/zenoh-codec/src/network/interest.rs b/commons/zenoh-codec/src/network/interest.rs index 9d1e64de76..852e106f98 100644 --- a/commons/zenoh-codec/src/network/interest.rs +++ b/commons/zenoh-codec/src/network/interest.rs @@ -24,8 +24,8 @@ use zenoh_protocol::{ core::WireExpr, network::{ declare, id, - interest::{self, Interest, InterestMode, InterestOptions}, - Mapping, + interest::{self, InterestMode, InterestOptions}, + Interest, Mapping, }, }; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 968ddcd99d..2027133a1e 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -48,7 +48,6 @@ flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } json5 = { workspace = true } -log = { workspace = true } zenoh-collections = { workspace = true } tracing = { workspace = true } zenoh = { workspace = true, default-features = true } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 35ce3f6e8f..c2112e28b2 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -14,7 +14,6 @@ #![recursion_limit = "256"] use futures::select; -use log::{debug, info}; use std::borrow::Cow; use std::collections::HashMap; use std::convert::TryFrom; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index a845f7065a..68387b6596 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -110,7 +110,7 @@ impl Aligner { .encoding(value.encoding().clone()) .timestamp(ts) .into(); - log::debug!("[ALIGNER] Adding {:?} to storage", sample); + tracing::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { tracing::error!("[ALIGNER] Error adding sample to storage: {}", e) }); @@ -343,7 +343,7 @@ impl Aligner { } Err(err) => { tracing::error!( - "[ALIGNER] Received error for query on selector {} :{}", + "[ALIGNER] Received error for query on selector {} :{:?}", selector, err ); @@ -353,7 +353,7 @@ impl Aligner { } } Err(err) => { - tracing::error!("[ALIGNER] Query failed on selector `{}`: {}", selector, err); + tracing::error!("[ALIGNER] Query failed on selector `{}`: {:?}", selector, err); no_err = false; } }; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 610c06bea2..3c218fa85a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -180,7 +180,7 @@ impl StorageService { // log error if the sample is not timestamped // This is to reduce down the line inconsistencies of having duplicate samples stored if sample.timestamp().is_none() { - tracing::error!("Sample {} is not timestamped. Please timestamp samples meant for replicated storage.", sample); + tracing::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { self.process_sample(sample).await; @@ -263,7 +263,7 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { - tracing::trace!("[STORAGE] Processing sample: {}", sample); + tracing::trace!("[STORAGE] Processing sample: {:?}", sample); // if wildcard, update wildcard_updates if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; @@ -289,7 +289,7 @@ impl StorageService { && self.is_latest(&k, sample.timestamp().unwrap()).await)) { tracing::trace!( - "Sample `{}` identified as neded processing for key {}", + "Sample `{:?}` identified as neded processing for key {}", sample, k ); @@ -656,7 +656,7 @@ impl StorageService { self.process_sample(sample).await; } Err(e) => tracing::warn!( - "Storage '{}' received an error to align query: {}", + "Storage '{}' received an error to align query: {:?}", self.name, e ), diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 3c76ca468a..5cf4b68b05 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -72,7 +72,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { Ok(zbuf) => { let _ = query.reply(key_expr, zbuf).res_sync(); } - Err(e) => log::debug!("Admin query error: {}", e), + Err(e) => tracing::debug!("Admin query error: {}", e), } } } @@ -89,7 +89,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { Ok(zbuf) => { let _ = query.reply(key_expr, zbuf).res_sync(); } - Err(e) => log::debug!("Admin query error: {}", e), + Err(e) => tracing::debug!("Admin query error: {}", e), } } } diff --git a/zenoh/src/handlers/callback.rs b/zenoh/src/handlers/callback.rs index 21c1b0878c..4f49e7c41f 100644 --- a/zenoh/src/handlers/callback.rs +++ b/zenoh/src/handlers/callback.rs @@ -43,7 +43,7 @@ impl IntoHandler<'static, T> for (flume::Sender, flume::Re ( Dyn::new(move |t| { if let Err(e) = sender.send(t) { - log::error!("{}", e) + tracing::error!("{}", e) } }), receiver, diff --git a/zenoh/src/handlers/fifo.rs b/zenoh/src/handlers/fifo.rs index 0fa3ab304c..f0ae1a5257 100644 --- a/zenoh/src/handlers/fifo.rs +++ b/zenoh/src/handlers/fifo.rs @@ -52,7 +52,7 @@ impl IntoHandler<'static, T> ( Dyn::new(move |t| { if let Err(e) = sender.send(t) { - log::error!("{}", e) + tracing::error!("{}", e) } }), receiver, diff --git a/zenoh/src/handlers/ring.rs b/zenoh/src/handlers/ring.rs index 341a3efadd..23347f249e 100644 --- a/zenoh/src/handlers/ring.rs +++ b/zenoh/src/handlers/ring.rs @@ -108,7 +108,7 @@ impl IntoHandler<'static, T> for RingChannel { drop(g); let _ = sender.try_send(()); } - Err(e) => log::error!("{}", e), + Err(e) => tracing::error!("{}", e), }), receiver, ) diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 365f390461..8589fab518 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -51,7 +51,7 @@ impl Primitives for Mux { let _ = self.handler.schedule(msg); } else if let Some(face) = self.face.get() { let Some(face) = face.upgrade() else { - log::debug!("Invalid face: {:?}. Interest not sent: {:?}", face, msg); + tracing::debug!("Invalid face: {:?}. Interest not sent: {:?}", face, msg); return; }; let ctx = RoutingContext::new_out(msg, face.clone()); @@ -65,7 +65,7 @@ impl Primitives for Mux { let _ = self.handler.schedule(ctx.msg); } } else { - log::debug!("Uninitialized multiplexer. Interest not sent: {:?}", msg); + tracing::debug!("Uninitialized multiplexer. Interest not sent: {:?}", msg); } } @@ -365,7 +365,7 @@ impl Primitives for McastMux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 7cefd3f455..fe2274ed64 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -43,7 +43,7 @@ pub(crate) fn declare_subscription( .cloned() { Some(mut prefix) => { - log::debug!( + tracing::debug!( "{} Declare subscriber {} ({}{})", face, id, @@ -114,7 +114,7 @@ pub(crate) fn undeclare_subscription( Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { Some(res) => Some(res), None => { - log::error!( + tracing::error!( "{} Undeclare unknown subscriber {}{}!", face, prefix.expr(), @@ -124,7 +124,7 @@ pub(crate) fn undeclare_subscription( } }, None => { - log::error!( + tracing::error!( "{} Undeclare subscriber with unknown scope {}", face, expr.scope diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 202fed9681..cd17f1339f 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -125,7 +125,7 @@ pub(crate) fn undeclare_queryable( Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { Some(res) => Some(res), None => { - log::error!( + tracing::error!( "{} Undeclare unknown queryable {}{}!", face, prefix.expr(), @@ -135,7 +135,7 @@ pub(crate) fn undeclare_queryable( } }, None => { - log::error!( + tracing::error!( "{} Undeclare queryable with unknown scope {}", face, expr.scope diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 7bb3fe9cde..b8fc62be57 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -435,7 +435,7 @@ impl QoS { match Priority::try_from(self.inner.get_priority()) { Ok(p) => p, Err(e) => { - log::trace!( + tracing::trace!( "Failed to convert priority: {}; replacing with default value", e.to_string() ); diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 2658d39d42..875c72a395 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2158,16 +2158,6 @@ impl Primitives for Session { fn send_response(&self, msg: Response) { trace!("recv Response {:?}", msg); match msg.payload { - ResponseBody::Ack(_) => { - tracing::warn!( - "Received a ResponseBody::Ack, but this isn't supported yet. Dropping message." - ) - } - ResponseBody::Put(_) => { - tracing::warn!( - "Received a ResponseBody::Put, but this isn't supported yet. Dropping message." - ) - } ResponseBody::Err(e) => { let mut state = zwrite!(self.state); match state.queries.get_mut(&msg.rid) { From 1c3eb8eeb0b32b111d8cd7e81eed22e903433845 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 16 Apr 2024 15:54:32 +0200 Subject: [PATCH 249/357] compile fix --- io/zenoh-transport/tests/unicast_time.rs | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index 75d3ae1d98..0c35dc1f79 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -230,7 +230,7 @@ async fn time_lowlatency_transport(endpoint: &EndPoint) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -239,7 +239,7 @@ async fn time_tcp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -248,7 +248,7 @@ async fn time_tcp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -257,7 +257,7 @@ async fn time_udp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -266,7 +266,7 @@ async fn time_udp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -275,7 +275,7 @@ async fn time_ws_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -284,7 +284,7 @@ async fn time_ws_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unixpipe_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only".parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -293,7 +293,7 @@ async fn time_unixpipe_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unixpipe_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_transport" .parse() .unwrap(); @@ -304,7 +304,7 @@ async fn time_unixpipe_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unix_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -318,8 +318,7 @@ async fn time_unix_only() { #[ignore] async fn time_tls_only() { use zenoh_link::tls::config::*; - - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -515,7 +514,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_vsock_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); time_lowlatency_transport(&endpoint).await; } From 2be12212e1b5f023f0c0b1c52190983ec10ce5a4 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 16 Apr 2024 19:24:05 +0200 Subject: [PATCH 250/357] compilation fixes --- Cargo.lock | 2 ++ plugins/zenoh-plugin-rest/src/lib.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh-ext/src/publication_cache.rs | 1 - zenoh/src/api/builders/publication.rs | 2 +- zenoh/src/api/builders/sample.rs | 2 -- zenoh/src/api/publication.rs | 2 +- zenoh/src/api/queryable.rs | 9 +++++++-- zenoh/src/api/selector.rs | 5 ++++- zenoh/src/api/session.rs | 19 +++++++++++-------- zenoh/src/lib.rs | 3 ++- 11 files changed, 30 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bcd0301bc3..9848e366be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5122,6 +5122,7 @@ dependencies = [ "tokio", "tracing", "zenoh", + "zenoh-util", ] [[package]] @@ -5471,6 +5472,7 @@ dependencies = [ "urlencoding", "zenoh", "zenoh-plugin-trait", + "zenoh-util", "zenoh_backend_traits", ] diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 5d3c0f5c79..ed48580920 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -37,7 +37,7 @@ use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::sample::{Sample, SampleKind, ValueBuilderTrait}; -use zenoh::selector::{Parameters, Selector, TIME_RANGE_KEY}; +use zenoh::selector::{Selector, TIME_RANGE_KEY}; use zenoh::session::{Session, SessionDeclarations}; use zenoh::value::Value; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 2733850036..ef6e00d63f 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -24,12 +24,12 @@ use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use tokio::task::JoinHandle; use zenoh::core::AsyncResolve; use zenoh::core::Error as ZError; use zenoh::core::Result as ZResult; use zenoh::internal::bail; use zenoh::internal::Condition; +use zenoh::internal::TaskController; use zenoh::key_expr::keyexpr; use zenoh::key_expr::KeyExpr; use zenoh::key_expr::OwnedKeyExpr; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index afd567a8c9..b6a380d766 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -22,7 +22,6 @@ use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; use zenoh::queryable::{Query, Queryable}; use zenoh::runtime::ZRuntime; use zenoh::sample::{Locality, Sample}; -use zenoh::selector::Parameters; use zenoh::session::{SessionDeclarations, SessionRef}; use zenoh::subscriber::FlumeSubscriber; use zenoh::{core::Result as ZResult, internal::bail}; diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 9a95317488..c710d0ad79 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -376,7 +376,7 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { is_express: self.is_express, destination: self.destination, }; - log::trace!("publish({:?})", publisher.key_expr); + tracing::trace!("publish({:?})", publisher.key_expr); Ok(publisher) } } diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 55a028f687..3b1bd642cd 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -18,8 +18,6 @@ use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; use crate::api::payload::Payload; use crate::api::publication::Priority; -#[cfg(feature = "unstable")] -use crate::api::sample::Attachment; use crate::api::sample::QoS; use crate::api::sample::QoSBuilder; use crate::api::sample::Sample; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 1f90ce422b..ed8422a75e 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -540,7 +540,7 @@ impl Publisher<'_> { #[cfg(feature = "unstable")] source_info: SourceInfo, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { - log::trace!("write({:?}, [...])", &self.key_expr); + tracing::trace!("write({:?}, [...])", &self.key_expr); let primitives = zread!(self.session.state) .primitives .as_ref() diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index b4dd7b51ac..942022a510 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -25,7 +25,12 @@ use super::{ Id, }; use crate::net::primitives::Primitives; -use std::{fmt, future::Ready, ops::Deref, sync::Arc}; +use std::{ + fmt, + future::Ready, + ops::{Deref, DerefMut}, + sync::Arc, +}; use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::{ @@ -213,7 +218,7 @@ impl Query { } #[cfg(feature = "unstable")] fn _accepts_any_replies(&self) -> ZResult { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; + use crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; Ok(self .parameters() diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 0e6881f526..1e2218d46d 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -20,7 +20,10 @@ use std::{ ops::{Deref, DerefMut}, str::FromStr, }; -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_protocol::core::{ + key_expr::{keyexpr, OwnedKeyExpr}, + Properties, +}; #[cfg(feature = "unstable")] use zenoh_result::ZResult; #[cfg(feature = "unstable")] diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 337c014597..412b4a2f6d 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -25,13 +25,15 @@ use super::{ query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, - selector::{Parameters, Selector, TIME_RANGE_KEY}, + selector::{Selector, TIME_RANGE_KEY}, subscriber::{SubscriberBuilder, SubscriberState}, value::Value, Id, }; -use crate::net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}; -use log::{error, trace, warn}; +use crate::{ + api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, + net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}, +}; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, @@ -44,6 +46,7 @@ use std::{ }, time::Duration, }; +use tracing::{error, trace, warn}; use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; @@ -79,11 +82,11 @@ use zenoh_util::core::AsyncResolve; #[cfg(feature = "unstable")] use super::{ - liveliness::{Liveliness, LivelinessTokenState}, - publication::Publisher, - publication::{MatchingListenerState, MatchingStatus}, - sample::{Attachment, SourceInfo}, - }; + liveliness::{Liveliness, LivelinessTokenState}, + publication::Publisher, + publication::{MatchingListenerState, MatchingStatus}, + sample::{Attachment, SourceInfo}, +}; zconfigurable! { pub(crate) static ref API_DATA_RECEPTION_CHANNEL_SIZE: usize = 256; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 15896bb04f..8f7645a965 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -239,10 +239,10 @@ pub mod payload { /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries pub mod selector { - pub use crate::api::selector::Parameter; pub use crate::api::selector::Parameters; pub use crate::api::selector::Selector; pub use crate::api::selector::TIME_RANGE_KEY; + pub use zenoh_protocol::core::Properties; pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; } @@ -351,6 +351,7 @@ pub mod internal { pub use zenoh_macros::unstable; pub use zenoh_result::bail; pub use zenoh_sync::Condition; + pub use zenoh_task::TaskController; pub use zenoh_task::TerminatableTask; pub use zenoh_util::core::ResolveFuture; pub use zenoh_util::LibLoader; From 334946c01b1bed73a4dfcf3377b31d4875de2448 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 16 Apr 2024 19:28:16 +0200 Subject: [PATCH 251/357] compilation fix --- zenoh/src/api/selector.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 1e2218d46d..9891726287 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -329,7 +329,7 @@ impl<'a> From> for Selector<'a> { #[test] fn selector_accessors() { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; + use crate::api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); for selector in [ From f6aebd437731361a107af3f0c45044de75d121f5 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 16 Apr 2024 19:32:45 +0200 Subject: [PATCH 252/357] cargo fmt --- plugins/zenoh-plugin-example/src/lib.rs | 2 +- plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs | 6 +++++- zenoh/src/api/encoding.rs | 1 - zenoh/src/api/scouting.rs | 1 - zenoh/tests/events.rs | 2 +- zenoh/tests/routing.rs | 2 +- zenohd/src/main.rs | 2 +- 7 files changed, 9 insertions(+), 7 deletions(-) diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 2b4c3b8b95..3a14525cd7 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -21,8 +21,8 @@ use std::sync::{ atomic::{AtomicBool, Ordering::Relaxed}, Arc, Mutex, }; -use zenoh::key_expr::{keyexpr, KeyExpr}; use tracing::{debug, info}; +use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::runtime::Runtime; use zenoh::sample::Sample; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 5322a819d3..92c743d512 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -354,7 +354,11 @@ impl Aligner { } } Err(err) => { - tracing::error!("[ALIGNER] Query failed on selector `{}`: {:?}", selector, err); + tracing::error!( + "[ALIGNER] Query failed on selector `{}`: {:?}", + selector, + err + ); no_err = false; } }; diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index aba01e01b4..3283ec1a84 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -850,4 +850,3 @@ impl EncodingMapping for SharedMemoryBuf { } pub struct EncodingBuilder(Encoding); - diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 4e0ad50b0b..2b0022f242 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -376,4 +376,3 @@ where handler: DefaultHandler::default(), } } - diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index b2c6ef862f..3f18b027a8 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::prelude::r#async::*; use zenoh::internal::ztimeout; +use zenoh::prelude::r#async::*; const TIMEOUT: Duration = Duration::from_secs(10); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 9c5ee527d5..dea8870905 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -16,9 +16,9 @@ use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh::core::Result; use zenoh::internal::{bail, ztimeout}; use zenoh::prelude::r#async::*; -use zenoh::core::Result; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 4e56fbfbfb..6867ac6a0b 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -15,10 +15,10 @@ use clap::Parser; use futures::future; use git_version::git_version; use std::collections::HashSet; -use zenoh::config::EndPoint; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; +use zenoh::config::EndPoint; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, PluginLoad, ValidatedMap}; use zenoh::core::Result; use zenoh::plugins::PluginsManager; From cc7b3529ab94696f88e15fe0d87d2ddb6c0ea2de Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:06:58 +0200 Subject: [PATCH 253/357] Merge main --- zenoh-ext/src/group.rs | 1 - zenoh/tests/acl.rs | 44 +++++++++++++++++++----------------------- 2 files changed, 20 insertions(+), 25 deletions(-) diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index a60635becf..0926c63d33 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -24,7 +24,6 @@ use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use tokio::task::JoinHandle; use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index fec29515db..c099fa021e 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -97,7 +97,7 @@ mod test { .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + *temp_value = sample.payload().deserialize::().unwrap(); }) .res_async() .await @@ -142,7 +142,7 @@ mod test { .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + *temp_value = sample.payload().deserialize::().unwrap(); }) .res_async()) .unwrap(); @@ -203,7 +203,7 @@ mod test { .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + *temp_value = sample.payload().deserialize::().unwrap(); }) .res_async()) .unwrap(); @@ -263,7 +263,7 @@ mod test { .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + *temp_value = sample.payload().deserialize::().unwrap(); }) .res_async()) .unwrap(); @@ -307,10 +307,9 @@ mod test { let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() }); }); }) @@ -320,12 +319,12 @@ mod test { tokio::time::sleep(SLEEP).await; let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; @@ -363,10 +362,9 @@ mod test { let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() }); }); }) @@ -376,12 +374,12 @@ mod test { tokio::time::sleep(SLEEP).await; let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; @@ -434,10 +432,9 @@ mod test { let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() }); }); }) @@ -447,12 +444,12 @@ mod test { tokio::time::sleep(SLEEP).await; let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; @@ -504,10 +501,9 @@ mod test { let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() }); }); }) @@ -517,12 +513,12 @@ mod test { tokio::time::sleep(SLEEP).await; let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; From 8a09bf90f42e543d04e2c8a6f03f5db2169baf41 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:11:46 +0200 Subject: [PATCH 254/357] cargo fmt --all --- plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 68387b6596..c1dc1196bf 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -353,7 +353,11 @@ impl Aligner { } } Err(err) => { - tracing::error!("[ALIGNER] Query failed on selector `{}`: {:?}", selector, err); + tracing::error!( + "[ALIGNER] Query failed on selector `{}`: {:?}", + selector, + err + ); no_err = false; } }; From a675130d99d5da5dcbd54b915f45e5d35c581322 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:14:10 +0200 Subject: [PATCH 255/357] Use tracing in examples --- examples/examples/z_sub_shm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 630876f287..f89df5ee60 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -20,7 +20,7 @@ use zenoh_shm::SharedMemoryBuf; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (mut config, key_expr) = parse_args(); From 72bcead175f3c874736bcaaa11aa686cb313dc66 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:21:23 +0200 Subject: [PATCH 256/357] Rename Payload to ZBytes --- examples/examples/z_ping.rs | 2 +- examples/examples/z_pub_thr.rs | 2 +- plugins/zenoh-plugin-rest/src/lib.rs | 4 +- .../src/replica/align_queryable.rs | 2 +- .../src/replica/aligner.rs | 2 +- .../src/replica/mod.rs | 2 +- .../tests/operations.rs | 2 +- .../tests/wildcard.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh/src/admin.rs | 6 +- zenoh/src/{payload.rs => bytes.rs} | 498 +++++++++--------- zenoh/src/encoding.rs | 4 +- zenoh/src/lib.rs | 2 +- zenoh/src/net/runtime/adminspace.rs | 12 +- zenoh/src/prelude.rs | 4 +- zenoh/src/publication.rs | 18 +- zenoh/src/query.rs | 8 +- zenoh/src/queryable.rs | 16 +- zenoh/src/sample/builder.rs | 18 +- zenoh/src/sample/mod.rs | 18 +- zenoh/src/session.rs | 4 +- zenoh/src/value.rs | 12 +- 22 files changed, 320 insertions(+), 320 deletions(-) rename zenoh/src/{payload.rs => bytes.rs} (73%) diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 1af30bfb10..f07c1eac66 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -39,7 +39,7 @@ fn main() { .res() .unwrap(); - let data: Payload = (0usize..size) + let data: ZBytes = (0usize..size) .map(|i| (i % 10) as u8) .collect::>() .into(); diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 41c5179b63..945a871094 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -30,7 +30,7 @@ fn main() { let payload_size = args.payload_size; - let data: Payload = (0..payload_size) + let data: ZBytes = (0..payload_size) .map(|i| (i % 10) as u8) .collect::>() .into(); diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index c9397aa4a9..82f02356d4 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,7 +29,7 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; -use zenoh::payload::StringOrBase64; +use zenoh::bytes::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, Reply}; @@ -61,7 +61,7 @@ pub fn base64_encode(data: &[u8]) -> String { general_purpose::STANDARD.encode(data) } -fn payload_to_json(payload: &Payload, encoding: &Encoding) -> serde_json::Value { +fn payload_to_json(payload: &ZBytes, encoding: &Encoding) -> serde_json::Value { match payload.is_empty() { // If the value is empty return a JSON null true => serde_json::Value::Null, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index f3f8ade729..8ac9d18f88 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,7 +18,7 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; -use zenoh::payload::StringOrBase64; +use zenoh::bytes::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index c1dc1196bf..3392bf28e8 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,8 +18,8 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; +use zenoh::bytes::StringOrBase64; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; -use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::sample::builder::SampleBuilder; use zenoh::time::Timestamp; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 525d446f3a..467751b04d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,7 +26,7 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; -use zenoh::payload::StringOrBase64; +use zenoh::bytes::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index c82459cdcc..a32648319e 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,7 +20,7 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; -use zenoh::payload::StringOrBase64; +use zenoh::bytes::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::{prelude::Config, time::Timestamp}; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index d778eadde4..6698f2a5b9 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,7 +21,7 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; -use zenoh::payload::StringOrBase64; +use zenoh::bytes::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::{prelude::Config, time::Timestamp}; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 0926c63d33..83b3c7b199 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -24,7 +24,7 @@ use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use zenoh::payload::PayloadReader; +use zenoh::bytes::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 5cf4b68b05..16de7dd0a5 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -17,7 +17,7 @@ use crate::{ prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, sample::DataInfo, - Payload, Session, ZResult, + Session, ZBytes, ZResult, }; use std::{ collections::hash_map::DefaultHasher, @@ -68,7 +68,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { let key_expr = *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { - match Payload::try_from(value) { + match ZBytes::try_from(value) { Ok(zbuf) => { let _ = query.reply(key_expr, zbuf).res_sync(); } @@ -85,7 +85,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid / *KE_LINK / lid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(link) { - match Payload::try_from(value) { + match ZBytes::try_from(value) { Ok(zbuf) => { let _ = query.reply(key_expr, zbuf).res_sync(); } diff --git a/zenoh/src/payload.rs b/zenoh/src/bytes.rs similarity index 73% rename from zenoh/src/payload.rs rename to zenoh/src/bytes.rs index 5280c7af3c..91bae8f517 100644 --- a/zenoh/src/payload.rs +++ b/zenoh/src/bytes.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // -//! Payload primitives. +//! ZBytes primitives. use crate::buffers::ZBuf; use std::str::Utf8Error; use std::{ @@ -45,21 +45,21 @@ pub trait Deserialize<'a, T> { type Error; /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &'a Payload) -> Result; + fn deserialize(self, t: &'a ZBytes) -> Result; } /// A payload contains the serialized bytes of user data. #[repr(transparent)] #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct Payload(ZBuf); +pub struct ZBytes(ZBuf); -impl Payload { +impl ZBytes { /// Create an empty payload. pub const fn empty() -> Self { Self(ZBuf::empty()) } - /// Create a [`Payload`] from any type `T` that implements [`Into`]. + /// Create a [`ZBytes`] from any type `T` that implements [`Into`]. pub fn new(t: T) -> Self where T: Into, @@ -77,34 +77,34 @@ impl Payload { self.0.len() } - /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. - pub fn reader(&self) -> PayloadReader<'_> { - PayloadReader(self.0.reader()) + /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. + pub fn reader(&self) -> ZBytesReader<'_> { + ZBytesReader(self.0.reader()) } - /// Build a [`Payload`] from a generic reader implementing [`std::io::Read`]. This operation copies data from the reader. + /// Build a [`ZBytes`] from a generic reader implementing [`std::io::Read`]. This operation copies data from the reader. pub fn from_reader(mut reader: R) -> Result where R: std::io::Read, { let mut buf: Vec = vec![]; reader.read_to_end(&mut buf)?; - Ok(Payload::new(buf)) + Ok(ZBytes::new(buf)) } - /// Get a [`PayloadWriter`] implementing [`std::io::Write`] trait. - pub fn writer(&mut self) -> PayloadWriter<'_> { - PayloadWriter(self.0.writer()) + /// Get a [`ZBytesWriter`] implementing [`std::io::Write`] trait. + pub fn writer(&mut self) -> ZBytesWriter<'_> { + ZBytesWriter(self.0.writer()) } - /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. - pub fn iter(&self) -> PayloadIterator<'_, T> + /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. + pub fn iter(&self) -> ZBytesIterator<'_, T> where - T: for<'b> TryFrom<&'b Payload>, + T: for<'b> TryFrom<&'b ZBytes>, for<'b> ZSerde: Deserialize<'b, T>, for<'b> >::Error: Debug, { - PayloadIterator { + ZBytesIterator { reader: self.0.reader(), _t: PhantomData::, } @@ -113,16 +113,16 @@ impl Payload { /// Serialize an object of type `T` as a [`Value`] using the [`ZSerde`]. /// /// ```rust - /// use zenoh::payload::Payload; + /// use zenoh::payload::ZBytes; /// /// let start = String::from("abc"); - /// let payload = Payload::serialize(start.clone()); + /// let payload = ZBytes::serialize(start.clone()); /// let end: String = payload.deserialize().unwrap(); /// assert_eq!(start, end); /// ``` pub fn serialize(t: T) -> Self where - ZSerde: Serialize, + ZSerde: Serialize, { ZSerde.serialize(t) } @@ -148,29 +148,29 @@ impl Payload { } } -/// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. +/// A reader that implements [`std::io::Read`] trait to read from a [`ZBytes`]. #[repr(transparent)] #[derive(Debug)] -pub struct PayloadReader<'a>(ZBufReader<'a>); +pub struct ZBytesReader<'a>(ZBufReader<'a>); -impl std::io::Read for PayloadReader<'_> { +impl std::io::Read for ZBytesReader<'_> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { std::io::Read::read(&mut self.0, buf) } } -impl std::io::Seek for PayloadReader<'_> { +impl std::io::Seek for ZBytesReader<'_> { fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { std::io::Seek::seek(&mut self.0, pos) } } -/// A writer that implements [`std::io::Write`] trait to write into a [`Payload`]. +/// A writer that implements [`std::io::Write`] trait to write into a [`ZBytes`]. #[repr(transparent)] #[derive(Debug)] -pub struct PayloadWriter<'a>(ZBufWriter<'a>); +pub struct ZBytesWriter<'a>(ZBufWriter<'a>); -impl std::io::Write for PayloadWriter<'_> { +impl std::io::Write for ZBytesWriter<'_> { fn write(&mut self, buf: &[u8]) -> std::io::Result { std::io::Write::write(&mut self.0, buf) } @@ -180,11 +180,11 @@ impl std::io::Write for PayloadWriter<'_> { } } -/// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. -/// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. +/// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`ZBytes`]. +/// Note that [`ZBytes`] contains a serialized version of `T` and iterating over a [`ZBytes`] performs lazy deserialization. #[repr(transparent)] #[derive(Debug)] -pub struct PayloadIterator<'a, T> +pub struct ZBytesIterator<'a, T> where ZSerde: Deserialize<'a, T>, { @@ -192,7 +192,7 @@ where _t: PhantomData, } -impl Iterator for PayloadIterator<'_, T> +impl Iterator for ZBytesIterator<'_, T> where for<'a> ZSerde: Deserialize<'a, T>, for<'a> >::Error: Debug, @@ -203,16 +203,16 @@ where let codec = Zenoh080::new(); let kbuf: ZBuf = codec.read(&mut self.reader).ok()?; - let kpld = Payload::new(kbuf); + let kpld = ZBytes::new(kbuf); let t = ZSerde.deserialize(&kpld).ok()?; Some(t) } } -impl FromIterator for Payload +impl FromIterator for ZBytes where - ZSerde: Serialize, + ZSerde: Serialize, { fn from_iter>(iter: T) -> Self { let codec = Zenoh080::new(); @@ -228,27 +228,27 @@ where } } - Payload::new(buffer) + ZBytes::new(buffer) } } -/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. +/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. #[repr(transparent)] #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct OptionPayload(Option); +pub struct OptionZBytes(Option); -impl From for OptionPayload +impl From for OptionZBytes where - T: Into, + T: Into, { fn from(value: T) -> Self { Self(Some(value.into())) } } -impl From> for OptionPayload +impl From> for OptionZBytes where - T: Into, + T: Into, { fn from(mut value: Option) -> Self { match value.take() { @@ -258,9 +258,9 @@ where } } -impl From<&Option> for OptionPayload +impl From<&Option> for OptionZBytes where - for<'a> &'a T: Into, + for<'a> &'a T: Into, { fn from(value: &Option) -> Self { match value.as_ref() { @@ -270,8 +270,8 @@ where } } -impl From for Option { - fn from(value: OptionPayload) -> Self { +impl From for Option { + fn from(value: OptionZBytes) -> Self { value.0 } } @@ -286,28 +286,28 @@ pub struct ZDeserializeError; // ZBuf impl Serialize for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: ZBuf) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } -impl From for Payload { +impl From for ZBytes { fn from(t: ZBuf) -> Self { ZSerde.serialize(t) } } impl Serialize<&ZBuf> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &ZBuf) -> Self::Output { - Payload::new(t.clone()) + ZBytes::new(t.clone()) } } -impl From<&ZBuf> for Payload { +impl From<&ZBuf> for ZBytes { fn from(t: &ZBuf) -> Self { ZSerde.serialize(t) } @@ -316,47 +316,47 @@ impl From<&ZBuf> for Payload { impl Deserialize<'_, ZBuf> for ZSerde { type Error = Infallible; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { Ok(v.0.clone()) } } -impl From for ZBuf { - fn from(value: Payload) -> Self { +impl From for ZBuf { + fn from(value: ZBytes) -> Self { value.0 } } -impl From<&Payload> for ZBuf { - fn from(value: &Payload) -> Self { +impl From<&ZBytes> for ZBuf { + fn from(value: &ZBytes) -> Self { ZSerde.deserialize(value).unwrap_infallible() } } // ZSlice impl Serialize for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: ZSlice) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } -impl From for Payload { +impl From for ZBytes { fn from(t: ZSlice) -> Self { ZSerde.serialize(t) } } impl Serialize<&ZSlice> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &ZSlice) -> Self::Output { - Payload::new(t.clone()) + ZBytes::new(t.clone()) } } -impl From<&ZSlice> for Payload { +impl From<&ZSlice> for ZBytes { fn from(t: &ZSlice) -> Self { ZSerde.serialize(t) } @@ -365,47 +365,47 @@ impl From<&ZSlice> for Payload { impl Deserialize<'_, ZSlice> for ZSerde { type Error = Infallible; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { Ok(v.0.to_zslice()) } } -impl From for ZSlice { - fn from(value: Payload) -> Self { +impl From for ZSlice { + fn from(value: ZBytes) -> Self { ZBuf::from(value).to_zslice() } } -impl From<&Payload> for ZSlice { - fn from(value: &Payload) -> Self { +impl From<&ZBytes> for ZSlice { + fn from(value: &ZBytes) -> Self { ZSerde.deserialize(value).unwrap_infallible() } } // [u8; N] impl Serialize<[u8; N]> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: [u8; N]) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } -impl From<[u8; N]> for Payload { +impl From<[u8; N]> for ZBytes { fn from(t: [u8; N]) -> Self { ZSerde.serialize(t) } } impl Serialize<&[u8; N]> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &[u8; N]) -> Self::Output { - Payload::new(*t) + ZBytes::new(*t) } } -impl From<&[u8; N]> for Payload { +impl From<&[u8; N]> for ZBytes { fn from(t: &[u8; N]) -> Self { ZSerde.serialize(t) } @@ -414,7 +414,7 @@ impl From<&[u8; N]> for Payload { impl Deserialize<'_, [u8; N]> for ZSerde { type Error = ZDeserializeError; - fn deserialize(self, v: &Payload) -> Result<[u8; N], Self::Error> { + fn deserialize(self, v: &ZBytes) -> Result<[u8; N], Self::Error> { use std::io::Read; if v.0.len() != N { @@ -427,46 +427,46 @@ impl Deserialize<'_, [u8; N]> for ZSerde { } } -impl TryFrom for [u8; N] { +impl TryFrom for [u8; N] { type Error = ZDeserializeError; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for [u8; N] { +impl TryFrom<&ZBytes> for [u8; N] { type Error = ZDeserializeError; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } // Vec impl Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: Vec) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } -impl From> for Payload { +impl From> for ZBytes { fn from(t: Vec) -> Self { ZSerde.serialize(t) } } impl Serialize<&Vec> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &Vec) -> Self::Output { - Payload::new(t.clone()) + ZBytes::new(t.clone()) } } -impl From<&Vec> for Payload { +impl From<&Vec> for ZBytes { fn from(t: &Vec) -> Self { ZSerde.serialize(t) } @@ -475,33 +475,33 @@ impl From<&Vec> for Payload { impl Deserialize<'_, Vec> for ZSerde { type Error = Infallible; - fn deserialize(self, v: &Payload) -> Result, Self::Error> { + fn deserialize(self, v: &ZBytes) -> Result, Self::Error> { Ok(v.0.contiguous().to_vec()) } } -impl From for Vec { - fn from(value: Payload) -> Self { +impl From for Vec { + fn from(value: ZBytes) -> Self { ZSerde.deserialize(&value).unwrap_infallible() } } -impl From<&Payload> for Vec { - fn from(value: &Payload) -> Self { +impl From<&ZBytes> for Vec { + fn from(value: &ZBytes) -> Self { ZSerde.deserialize(value).unwrap_infallible() } } // &[u8] impl Serialize<&[u8]> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &[u8]) -> Self::Output { - Payload::new(t.to_vec()) + ZBytes::new(t.to_vec()) } } -impl From<&[u8]> for Payload { +impl From<&[u8]> for ZBytes { fn from(t: &[u8]) -> Self { ZSerde.serialize(t) } @@ -509,28 +509,28 @@ impl From<&[u8]> for Payload { // Cow<[u8]> impl<'a> Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: Cow<'a, [u8]>) -> Self::Output { - Payload::new(t.to_vec()) + ZBytes::new(t.to_vec()) } } -impl From> for Payload { +impl From> for ZBytes { fn from(t: Cow<'_, [u8]>) -> Self { ZSerde.serialize(t) } } impl<'a> Serialize<&Cow<'a, [u8]>> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &Cow<'a, [u8]>) -> Self::Output { - Payload::new(t.to_vec()) + ZBytes::new(t.to_vec()) } } -impl From<&Cow<'_, [u8]>> for Payload { +impl From<&Cow<'_, [u8]>> for ZBytes { fn from(t: &Cow<'_, [u8]>) -> Self { ZSerde.serialize(t) } @@ -539,13 +539,13 @@ impl From<&Cow<'_, [u8]>> for Payload { impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { type Error = Infallible; - fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { + fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { Ok(v.0.contiguous()) } } -impl From for Cow<'static, [u8]> { - fn from(v: Payload) -> Self { +impl From for Cow<'static, [u8]> { + fn from(v: ZBytes) -> Self { match v.0.contiguous() { Cow::Borrowed(s) => Cow::Owned(s.to_vec()), Cow::Owned(s) => Cow::Owned(s), @@ -553,36 +553,36 @@ impl From for Cow<'static, [u8]> { } } -impl<'a> From<&'a Payload> for Cow<'a, [u8]> { - fn from(value: &'a Payload) -> Self { +impl<'a> From<&'a ZBytes> for Cow<'a, [u8]> { + fn from(value: &'a ZBytes) -> Self { ZSerde.deserialize(value).unwrap_infallible() } } // String impl Serialize for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, s: String) -> Self::Output { - Payload::new(s.into_bytes()) + ZBytes::new(s.into_bytes()) } } -impl From for Payload { +impl From for ZBytes { fn from(t: String) -> Self { ZSerde.serialize(t) } } impl Serialize<&String> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, s: &String) -> Self::Output { - Payload::new(s.clone().into_bytes()) + ZBytes::new(s.clone().into_bytes()) } } -impl From<&String> for Payload { +impl From<&String> for ZBytes { fn from(t: &String) -> Self { ZSerde.serialize(t) } @@ -591,66 +591,66 @@ impl From<&String> for Payload { impl Deserialize<'_, String> for ZSerde { type Error = FromUtf8Error; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); String::from_utf8(v) } } -impl TryFrom for String { +impl TryFrom for String { type Error = FromUtf8Error; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for String { +impl TryFrom<&ZBytes> for String { type Error = FromUtf8Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } // &str impl Serialize<&str> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, s: &str) -> Self::Output { Self.serialize(s.to_string()) } } -impl From<&str> for Payload { +impl From<&str> for ZBytes { fn from(t: &str) -> Self { ZSerde.serialize(t) } } impl<'a> Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, s: Cow<'a, str>) -> Self::Output { Self.serialize(s.to_string()) } } -impl From> for Payload { +impl From> for ZBytes { fn from(t: Cow<'_, str>) -> Self { ZSerde.serialize(t) } } impl<'a> Serialize<&Cow<'a, str>> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, s: &Cow<'a, str>) -> Self::Output { Self.serialize(s.to_string()) } } -impl From<&Cow<'_, str>> for Payload { +impl From<&Cow<'_, str>> for ZBytes { fn from(t: &Cow<'_, str>) -> Self { ZSerde.serialize(t) } @@ -659,15 +659,15 @@ impl From<&Cow<'_, str>> for Payload { impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { type Error = Utf8Error; - fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { + fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { Cow::try_from(v) } } -impl TryFrom for Cow<'static, str> { +impl TryFrom for Cow<'static, str> { type Error = Utf8Error; - fn try_from(v: Payload) -> Result { + fn try_from(v: ZBytes) -> Result { let v: Cow<'static, [u8]> = Cow::from(v); let _ = core::str::from_utf8(v.as_ref())?; // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 @@ -676,10 +676,10 @@ impl TryFrom for Cow<'static, str> { } } -impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { +impl<'a> TryFrom<&'a ZBytes> for Cow<'a, str> { type Error = Utf8Error; - fn try_from(v: &'a Payload) -> Result { + fn try_from(v: &'a ZBytes) -> Result { let v: Cow<'a, [u8]> = Cow::from(v); let _ = core::str::from_utf8(v.as_ref())?; // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 @@ -692,7 +692,7 @@ impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { macro_rules! impl_int { ($t:ty) => { impl Serialize<$t> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: $t) -> Self::Output { let bs = t.to_le_bytes(); @@ -704,25 +704,25 @@ macro_rules! impl_int { // SAFETY: // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 // - end is a valid end index because is bounded between 0 and bs.len() - Payload::new(unsafe { ZSlice::new_unchecked(Arc::new(bs), 0, end) }) + ZBytes::new(unsafe { ZSlice::new_unchecked(Arc::new(bs), 0, end) }) } } - impl From<$t> for Payload { + impl From<$t> for ZBytes { fn from(t: $t) -> Self { ZSerde.serialize(t) } } impl Serialize<&$t> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &$t) -> Self::Output { Self.serialize(*t) } } - impl From<&$t> for Payload { + impl From<&$t> for ZBytes { fn from(t: &$t) -> Self { ZSerde.serialize(t) } @@ -731,7 +731,7 @@ macro_rules! impl_int { impl<'a> Deserialize<'a, $t> for ZSerde { type Error = ZDeserializeError; - fn deserialize(self, v: &Payload) -> Result<$t, Self::Error> { + fn deserialize(self, v: &ZBytes) -> Result<$t, Self::Error> { use std::io::Read; let mut r = v.reader(); @@ -746,18 +746,18 @@ macro_rules! impl_int { } } - impl TryFrom for $t { + impl TryFrom for $t { type Error = ZDeserializeError; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } - impl TryFrom<&Payload> for $t { + impl TryFrom<&ZBytes> for $t { type Error = ZDeserializeError; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } @@ -784,30 +784,30 @@ impl_int!(f64); // Zenoh bool impl Serialize for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: bool) -> Self::Output { // SAFETY: casting a bool into an integer is well-defined behaviour. // 0 is false, 1 is true: https://doc.rust-lang.org/std/primitive.bool.html - Payload::new(ZBuf::from((t as u8).to_le_bytes())) + ZBytes::new(ZBuf::from((t as u8).to_le_bytes())) } } -impl From for Payload { +impl From for ZBytes { fn from(t: bool) -> Self { ZSerde.serialize(t) } } impl Serialize<&bool> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &bool) -> Self::Output { ZSerde.serialize(*t) } } -impl From<&bool> for Payload { +impl From<&bool> for ZBytes { fn from(t: &bool) -> Self { ZSerde.serialize(t) } @@ -816,7 +816,7 @@ impl From<&bool> for Payload { impl Deserialize<'_, bool> for ZSerde { type Error = ZDeserializeError; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { let p = v.deserialize::().map_err(|_| ZDeserializeError)?; match p { 0 => Ok(false), @@ -826,18 +826,18 @@ impl Deserialize<'_, bool> for ZSerde { } } -impl TryFrom for bool { +impl TryFrom for bool { type Error = ZDeserializeError; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for bool { +impl TryFrom<&ZBytes> for bool { type Error = ZDeserializeError; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } @@ -845,28 +845,28 @@ impl TryFrom<&Payload> for bool { // - Zenoh advanced types encoders/decoders // Properties impl Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: Properties<'_>) -> Self::Output { Self.serialize(t.as_str()) } } -impl From> for Payload { +impl From> for ZBytes { fn from(t: Properties<'_>) -> Self { ZSerde.serialize(t) } } impl Serialize<&Properties<'_>> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &Properties<'_>) -> Self::Output { Self.serialize(t.as_str()) } } -impl<'s> From<&'s Properties<'s>> for Payload { +impl<'s> From<&'s Properties<'s>> for ZBytes { fn from(t: &'s Properties<'s>) -> Self { ZSerde.serialize(t) } @@ -875,7 +875,7 @@ impl<'s> From<&'s Properties<'s>> for Payload { impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { type Error = ZDeserializeError; - fn deserialize(self, v: &'s Payload) -> Result, Self::Error> { + fn deserialize(self, v: &'s ZBytes) -> Result, Self::Error> { let s = v .deserialize::>() .map_err(|_| ZDeserializeError)?; @@ -883,33 +883,33 @@ impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { } } -impl TryFrom for Properties<'static> { +impl TryFrom for Properties<'static> { type Error = ZDeserializeError; - fn try_from(v: Payload) -> Result { + fn try_from(v: ZBytes) -> Result { let s = v.deserialize::>().map_err(|_| ZDeserializeError)?; Ok(Properties::from(s.into_owned())) } } -impl<'s> TryFrom<&'s Payload> for Properties<'s> { +impl<'s> TryFrom<&'s ZBytes> for Properties<'s> { type Error = ZDeserializeError; - fn try_from(value: &'s Payload) -> Result { + fn try_from(value: &'s ZBytes) -> Result { ZSerde.deserialize(value) } } // JSON impl Serialize for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: serde_json::Value) -> Self::Output { ZSerde.serialize(&t) } } -impl TryFrom for Payload { +impl TryFrom for ZBytes { type Error = serde_json::Error; fn try_from(value: serde_json::Value) -> Result { @@ -918,16 +918,16 @@ impl TryFrom for Payload { } impl Serialize<&serde_json::Value> for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: &serde_json::Value) -> Self::Output { - let mut payload = Payload::empty(); + let mut payload = ZBytes::empty(); serde_json::to_writer(payload.writer(), t)?; Ok(payload) } } -impl TryFrom<&serde_json::Value> for Payload { +impl TryFrom<&serde_json::Value> for ZBytes { type Error = serde_json::Error; fn try_from(value: &serde_json::Value) -> Result { @@ -938,37 +938,37 @@ impl TryFrom<&serde_json::Value> for Payload { impl Deserialize<'_, serde_json::Value> for ZSerde { type Error = serde_json::Error; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { serde_json::from_reader(v.reader()) } } -impl TryFrom for serde_json::Value { +impl TryFrom for serde_json::Value { type Error = serde_json::Error; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for serde_json::Value { +impl TryFrom<&ZBytes> for serde_json::Value { type Error = serde_json::Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } // Yaml impl Serialize for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: serde_yaml::Value) -> Self::Output { Self.serialize(&t) } } -impl TryFrom for Payload { +impl TryFrom for ZBytes { type Error = serde_yaml::Error; fn try_from(value: serde_yaml::Value) -> Result { @@ -977,16 +977,16 @@ impl TryFrom for Payload { } impl Serialize<&serde_yaml::Value> for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: &serde_yaml::Value) -> Self::Output { - let mut payload = Payload::empty(); + let mut payload = ZBytes::empty(); serde_yaml::to_writer(payload.writer(), t)?; Ok(payload) } } -impl TryFrom<&serde_yaml::Value> for Payload { +impl TryFrom<&serde_yaml::Value> for ZBytes { type Error = serde_yaml::Error; fn try_from(value: &serde_yaml::Value) -> Result { @@ -997,37 +997,37 @@ impl TryFrom<&serde_yaml::Value> for Payload { impl Deserialize<'_, serde_yaml::Value> for ZSerde { type Error = serde_yaml::Error; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { serde_yaml::from_reader(v.reader()) } } -impl TryFrom for serde_yaml::Value { +impl TryFrom for serde_yaml::Value { type Error = serde_yaml::Error; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for serde_yaml::Value { +impl TryFrom<&ZBytes> for serde_yaml::Value { type Error = serde_yaml::Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } // CBOR impl Serialize for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: serde_cbor::Value) -> Self::Output { Self.serialize(&t) } } -impl TryFrom for Payload { +impl TryFrom for ZBytes { type Error = serde_cbor::Error; fn try_from(value: serde_cbor::Value) -> Result { @@ -1036,16 +1036,16 @@ impl TryFrom for Payload { } impl Serialize<&serde_cbor::Value> for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: &serde_cbor::Value) -> Self::Output { - let mut payload = Payload::empty(); + let mut payload = ZBytes::empty(); serde_cbor::to_writer(payload.0.writer(), t)?; Ok(payload) } } -impl TryFrom<&serde_cbor::Value> for Payload { +impl TryFrom<&serde_cbor::Value> for ZBytes { type Error = serde_cbor::Error; fn try_from(value: &serde_cbor::Value) -> Result { @@ -1056,37 +1056,37 @@ impl TryFrom<&serde_cbor::Value> for Payload { impl Deserialize<'_, serde_cbor::Value> for ZSerde { type Error = serde_cbor::Error; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { serde_cbor::from_reader(v.reader()) } } -impl TryFrom for serde_cbor::Value { +impl TryFrom for serde_cbor::Value { type Error = serde_cbor::Error; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for serde_cbor::Value { +impl TryFrom<&ZBytes> for serde_cbor::Value { type Error = serde_cbor::Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } // Pickle impl Serialize for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: serde_pickle::Value) -> Self::Output { Self.serialize(&t) } } -impl TryFrom for Payload { +impl TryFrom for ZBytes { type Error = serde_pickle::Error; fn try_from(value: serde_pickle::Value) -> Result { @@ -1095,10 +1095,10 @@ impl TryFrom for Payload { } impl Serialize<&serde_pickle::Value> for ZSerde { - type Output = Result; + type Output = Result; fn serialize(self, t: &serde_pickle::Value) -> Self::Output { - let mut payload = Payload::empty(); + let mut payload = ZBytes::empty(); serde_pickle::value_to_writer( &mut payload.0.writer(), t, @@ -1108,7 +1108,7 @@ impl Serialize<&serde_pickle::Value> for ZSerde { } } -impl TryFrom<&serde_pickle::Value> for Payload { +impl TryFrom<&serde_pickle::Value> for ZBytes { type Error = serde_pickle::Error; fn try_from(value: &serde_pickle::Value) -> Result { @@ -1119,23 +1119,23 @@ impl TryFrom<&serde_pickle::Value> for Payload { impl Deserialize<'_, serde_pickle::Value> for ZSerde { type Error = serde_pickle::Error; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) } } -impl TryFrom for serde_pickle::Value { +impl TryFrom for serde_pickle::Value { type Error = serde_pickle::Error; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for serde_pickle::Value { +impl TryFrom<&ZBytes> for serde_pickle::Value { type Error = serde_pickle::Error; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } @@ -1143,14 +1143,14 @@ impl TryFrom<&Payload> for serde_pickle::Value { // Shared memory conversion #[cfg(feature = "shared-memory")] impl Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: Arc) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } #[cfg(feature = "shared-memory")] -impl From> for Payload { +impl From> for ZBytes { fn from(t: Arc) -> Self { ZSerde.serialize(t) } @@ -1158,7 +1158,7 @@ impl From> for Payload { #[cfg(feature = "shared-memory")] impl Serialize> for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: Box) -> Self::Output { let smb: Arc = t.into(); @@ -1167,7 +1167,7 @@ impl Serialize> for ZSerde { } #[cfg(feature = "shared-memory")] -impl From> for Payload { +impl From> for ZBytes { fn from(t: Box) -> Self { ZSerde.serialize(t) } @@ -1175,15 +1175,15 @@ impl From> for Payload { #[cfg(feature = "shared-memory")] impl Serialize for ZSerde { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: SharedMemoryBuf) -> Self::Output { - Payload::new(t) + ZBytes::new(t) } } #[cfg(feature = "shared-memory")] -impl From for Payload { +impl From for ZBytes { fn from(t: SharedMemoryBuf) -> Self { ZSerde.serialize(t) } @@ -1193,7 +1193,7 @@ impl From for Payload { impl Deserialize<'_, SharedMemoryBuf> for ZSerde { type Error = ZDeserializeError; - fn deserialize(self, v: &Payload) -> Result { + fn deserialize(self, v: &ZBytes) -> Result { // A SharedMemoryBuf is expected to have only one slice let mut zslices = v.0.zslices(); if let Some(zs) = zslices.next() { @@ -1206,10 +1206,10 @@ impl Deserialize<'_, SharedMemoryBuf> for ZSerde { } #[cfg(feature = "shared-memory")] -impl TryFrom for SharedMemoryBuf { +impl TryFrom for SharedMemoryBuf { type Error = ZDeserializeError; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } @@ -1222,8 +1222,8 @@ macro_rules! impl_tuple { let codec = Zenoh080::new(); let mut buffer: ZBuf = ZBuf::empty(); let mut writer = buffer.writer(); - let apld: Payload = a.into(); - let bpld: Payload = b.into(); + let apld: ZBytes = a.into(); + let bpld: ZBytes = b.into(); // SAFETY: we are serializing slices on a ZBuf, so serialization will never // fail unless we run out of memory. In that case, Rust memory allocator @@ -1233,15 +1233,15 @@ macro_rules! impl_tuple { codec.write(&mut writer, &bpld.0).unwrap_unchecked(); } - Payload::new(buffer) + ZBytes::new(buffer) }}; } impl Serialize<(A, B)> for ZSerde where - A: Into, - B: Into, + A: Into, + B: Into, { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: (A, B)) -> Self::Output { impl_tuple!(t) @@ -1250,20 +1250,20 @@ where impl Serialize<&(A, B)> for ZSerde where - for<'a> &'a A: Into, - for<'b> &'b B: Into, + for<'a> &'a A: Into, + for<'b> &'b B: Into, { - type Output = Payload; + type Output = ZBytes; fn serialize(self, t: &(A, B)) -> Self::Output { impl_tuple!(t) } } -impl From<(A, B)> for Payload +impl From<(A, B)> for ZBytes where - A: Into, - B: Into, + A: Into, + B: Into, { fn from(value: (A, B)) -> Self { ZSerde.serialize(value) @@ -1272,22 +1272,22 @@ where impl Deserialize<'_, (A, B)> for ZSerde where - for<'a> A: TryFrom<&'a Payload>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b Payload>, - for<'b> >::Error: Debug, + for<'a> A: TryFrom<&'a ZBytes>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b ZBytes>, + for<'b> >::Error: Debug, { type Error = ZError; - fn deserialize(self, payload: &Payload) -> Result<(A, B), Self::Error> { + fn deserialize(self, payload: &ZBytes) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); let mut reader = payload.0.reader(); let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - let apld = Payload::new(abuf); + let apld = ZBytes::new(abuf); let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - let bpld = Payload::new(bbuf); + let bpld = ZBytes::new(bbuf); let a = A::try_from(&apld).map_err(|e| zerror!("{:?}", e))?; let b = B::try_from(&bpld).map_err(|e| zerror!("{:?}", e))?; @@ -1295,30 +1295,30 @@ where } } -impl TryFrom for (A, B) +impl TryFrom for (A, B) where - A: for<'a> TryFrom<&'a Payload>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b Payload>, - for<'b> >::Error: Debug, + A: for<'a> TryFrom<&'a ZBytes>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b ZBytes>, + for<'b> >::Error: Debug, { type Error = ZError; - fn try_from(value: Payload) -> Result { + fn try_from(value: ZBytes) -> Result { ZSerde.deserialize(&value) } } -impl TryFrom<&Payload> for (A, B) +impl TryFrom<&ZBytes> for (A, B) where - for<'a> A: TryFrom<&'a Payload>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b Payload>, - for<'b> >::Error: Debug, + for<'a> A: TryFrom<&'a ZBytes>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b ZBytes>, + for<'b> >::Error: Debug, { type Error = ZError; - fn try_from(value: &Payload) -> Result { + fn try_from(value: &ZBytes) -> Result { ZSerde.deserialize(value) } } @@ -1354,8 +1354,8 @@ impl std::fmt::Display for StringOrBase64 { } } -impl From<&Payload> for StringOrBase64 { - fn from(v: &Payload) -> Self { +impl From<&ZBytes> for StringOrBase64 { + fn from(v: &ZBytes) -> Self { use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; match v.deserialize::() { Ok(s) => StringOrBase64::String(s), @@ -1367,7 +1367,7 @@ impl From<&Payload> for StringOrBase64 { mod tests { #[test] fn serializer() { - use super::Payload; + use super::ZBytes; use rand::Rng; use std::borrow::Cow; use zenoh_buffers::{ZBuf, ZSlice}; @@ -1380,7 +1380,7 @@ mod tests { let i = $in; let t = i.clone(); println!("Serialize:\t{:?}", t); - let v = Payload::serialize(t); + let v = ZBytes::serialize(t); println!("Deserialize:\t{:?}", v); let o: $t = v.deserialize().unwrap(); assert_eq!(i, o); @@ -1476,7 +1476,7 @@ mod tests { // Iterator let v: [usize; 5] = [0, 1, 2, 3, 4]; println!("Serialize:\t{:?}", v); - let p = Payload::from_iter(v.iter()); + let p = ZBytes::from_iter(v.iter()); println!("Deserialize:\t{:?}\n", p); for (i, t) in p.iter::().enumerate() { assert_eq!(i, t); @@ -1484,7 +1484,7 @@ mod tests { let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; println!("Serialize:\t{:?}", v); - let p = Payload::from_iter(v.drain(..)); + let p = ZBytes::from_iter(v.drain(..)); println!("Deserialize:\t{:?}\n", p); let mut iter = p.iter::<[u8; 4]>(); assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); @@ -1498,7 +1498,7 @@ mod tests { hm.insert(0, 0); hm.insert(1, 1); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); + let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, usize)>()); assert_eq!(hm, o); @@ -1507,7 +1507,7 @@ mod tests { hm.insert(0, vec![0u8; 8]); hm.insert(1, vec![1u8; 16]); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); + let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); assert_eq!(hm, o); @@ -1516,7 +1516,7 @@ mod tests { hm.insert(0, vec![0u8; 8]); hm.insert(1, vec![1u8; 16]); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); + let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); assert_eq!(hm, o); @@ -1525,7 +1525,7 @@ mod tests { hm.insert(0, ZSlice::from(vec![0u8; 8])); hm.insert(1, ZSlice::from(vec![1u8; 16])); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); + let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, ZSlice)>()); assert_eq!(hm, o); @@ -1534,7 +1534,7 @@ mod tests { hm.insert(0, ZBuf::from(vec![0u8; 8])); hm.insert(1, ZBuf::from(vec![1u8; 16])); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); + let p = ZBytes::from_iter(hm.clone().drain()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>()); assert_eq!(hm, o); @@ -1543,7 +1543,7 @@ mod tests { hm.insert(0, vec![0u8; 8]); hm.insert(1, vec![1u8; 16]); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); + let p = ZBytes::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); assert_eq!(hm, o); @@ -1552,7 +1552,7 @@ mod tests { hm.insert(String::from("0"), String::from("a")); hm.insert(String::from("1"), String::from("b")); println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.iter()); + let p = ZBytes::from_iter(hm.iter()); println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(String, String)>()); assert_eq!(hm, o); diff --git a/zenoh/src/encoding.rs b/zenoh/src/encoding.rs index d9fa725ed5..81dfb04752 100644 --- a/zenoh/src/encoding.rs +++ b/zenoh/src/encoding.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::payload::Payload; +use crate::bytes::ZBytes; use phf::phf_map; use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; use zenoh_buffers::{ZBuf, ZSlice}; @@ -727,7 +727,7 @@ pub trait EncodingMapping { } // Bytes -impl EncodingMapping for Payload { +impl EncodingMapping for ZBytes { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 8de143fd8d..7e25375d64 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -134,12 +134,12 @@ pub use net::runtime; pub mod selector; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; +pub mod bytes; pub(crate) mod encoding; pub mod handlers; pub mod info; #[cfg(feature = "unstable")] pub mod liveliness; -pub mod payload; pub mod plugins; pub mod prelude; pub mod publication; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 09009cabd7..c7e951a963 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -12,10 +12,10 @@ // ZettaScale Zenoh Team, use super::routing::dispatcher::face::Face; use super::Runtime; +use crate::bytes::ZBytes; use crate::encoding::Encoding; use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; -use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; use crate::prelude::sync::SyncResolve; use crate::queryable::Query; @@ -580,7 +580,7 @@ fn router_data(context: &AdminContext, query: Query) { } tracing::trace!("AdminSpace router_data: {:?}", json); - let payload = match Payload::try_from(json) { + let payload = match ZBytes::try_from(json) { Ok(p) => p, Err(e) => { tracing::error!("Error serializing AdminSpace reply: {:?}", e); @@ -664,7 +664,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(key, Payload::empty()).res() { + if let Err(e) = query.reply(key, ZBytes::empty()).res() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -681,7 +681,7 @@ fn queryables_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(key, Payload::empty()).res() { + if let Err(e) = query.reply(key, ZBytes::empty()).res() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -699,7 +699,7 @@ fn plugins_data(context: &AdminContext, query: Query) { tracing::debug!("plugin status: {:?}", status); let key = root_key.join(status.name()).unwrap(); let status = serde_json::to_value(status).unwrap(); - match Payload::try_from(status) { + match ZBytes::try_from(status) { Ok(zbuf) => { if let Err(e) = query.reply(key, zbuf).res_sync() { tracing::error!("Error sending AdminSpace reply: {:?}", e); @@ -744,7 +744,7 @@ fn plugins_status(context: &AdminContext, query: Query) { Ok(Ok(responses)) => { for response in responses { if let Ok(key_expr) = KeyExpr::try_from(response.key) { - match Payload::try_from(response.value) { + match ZBytes::try_from(response.value) { Ok(zbuf) => { if let Err(e) = query.reply(key_expr, zbuf).res_sync() { tracing::error!("Error sending AdminSpace reply: {:?}", e); diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 9158425034..2bb14fe2a4 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -43,9 +43,9 @@ pub(crate) mod common { pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; - pub use crate::encoding::Encoding; /// The encoding of a zenoh `Value`. - pub use crate::payload::{Deserialize, Payload, Serialize}; + pub use crate::bytes::{Deserialize, Serialize, ZBytes}; + pub use crate::encoding::Encoding; pub use crate::value::Value; #[zenoh_macros::unstable] diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 303f120360..8dd5883f0f 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -20,8 +20,8 @@ use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ + bytes::OptionZBytes, handlers::{Callback, DefaultHandler, IntoHandler}, - payload::OptionPayload, sample::Attachment, Id, }; @@ -40,7 +40,7 @@ pub use zenoh_protocol::core::CongestionControl; #[derive(Debug, Clone)] pub struct PublicationBuilderPut { - pub(crate) payload: Payload, + pub(crate) payload: ZBytes, pub(crate) encoding: Encoding, } #[derive(Debug, Clone)] @@ -138,7 +138,7 @@ impl

ValueBuilderTrait for PublicationBuilder { fn payload(self, payload: IntoPayload) -> Self where - IntoPayload: Into, + IntoPayload: Into, { Self { kind: PublicationBuilderPut { @@ -167,8 +167,8 @@ impl SampleBuilderTrait for PublicationBuilder { } } #[cfg(feature = "unstable")] - fn attachment>(self, attachment: TA) -> Self { - let attachment: OptionPayload = attachment.into(); + fn attachment>(self, attachment: TA) -> Self { + let attachment: OptionZBytes = attachment.into(); Self { attachment: attachment.into(), ..self @@ -213,7 +213,7 @@ impl SyncResolve for PublicationBuilder, PublicationBui let publisher = self.publisher.create_one_shot_publisher()?; resolve_put( &publisher, - Payload::empty(), + ZBytes::empty(), SampleKind::Delete, Encoding::ZENOH_BYTES, self.timestamp, @@ -421,7 +421,7 @@ impl<'a> Publisher<'a> { #[inline] pub fn put(&self, payload: IntoPayload) -> PublisherPutBuilder<'_> where - IntoPayload: Into, + IntoPayload: Into, { PublicationBuilder { publisher: self, @@ -708,7 +708,7 @@ impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete fn res_sync(self) -> ::To { resolve_put( self.publisher, - Payload::empty(), + ZBytes::empty(), SampleKind::Delete, Encoding::ZENOH_BYTES, self.timestamp, @@ -941,7 +941,7 @@ impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { fn resolve_put( publisher: &Publisher<'_>, - payload: Payload, + payload: ZBytes, kind: SampleKind, encoding: Encoding, timestamp: Option, diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index becfad4922..a0c2e3cfbb 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -18,7 +18,7 @@ use crate::prelude::*; use crate::sample::QoSBuilder; use crate::Session; #[cfg(feature = "unstable")] -use crate::{payload::OptionPayload, sample::Attachment}; +use crate::{bytes::OptionZBytes, sample::Attachment}; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; @@ -165,8 +165,8 @@ impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { } #[cfg(feature = "unstable")] - fn attachment>(self, attachment: T) -> Self { - let attachment: OptionPayload = attachment.into(); + fn attachment>(self, attachment: T) -> Self { + let attachment: OptionZBytes = attachment.into(); Self { attachment: attachment.into(), ..self @@ -201,7 +201,7 @@ impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { } } - fn payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { let mut value = self.value.unwrap_or_default(); value.payload = payload.into(); Self { diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 755e0364af..8147eb2885 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -26,7 +26,7 @@ use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ - payload::OptionPayload, + bytes::OptionZBytes, query::ReplyKeyExpr, sample::{Attachment, SourceInfo}, }; @@ -105,7 +105,7 @@ impl Query { /// This Query's payload. #[inline(always)] - pub fn payload(&self) -> Option<&Payload> { + pub fn payload(&self) -> Option<&ZBytes> { self.inner.value.as_ref().map(|v| &v.payload) } @@ -150,7 +150,7 @@ impl Query { where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoPayload: Into, + IntoPayload: Into, { ReplyBuilder { query: self, @@ -275,7 +275,7 @@ impl AsyncResolve for ReplySample<'_> { #[derive(Debug)] pub struct ReplyBuilderPut { - payload: super::Payload, + payload: super::ZBytes, encoding: super::Encoding, } #[derive(Debug)] @@ -314,8 +314,8 @@ impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { #[cfg(feature = "unstable")] - fn attachment>(self, attachment: U) -> Self { - let attachment: OptionPayload = attachment.into(); + fn attachment>(self, attachment: U) -> Self { + let attachment: OptionZBytes = attachment.into(); Self { attachment: attachment.into(), ..self @@ -359,7 +359,7 @@ impl ValueBuilderTrait for ReplyBuilder<'_, '_, ReplyBuilderPut> { } } - fn payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { Self { kind: ReplyBuilderPut { payload: payload.into(), @@ -501,7 +501,7 @@ impl ValueBuilderTrait for ReplyErrBuilder<'_> { Self { value, ..self } } - fn payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { let mut value = self.value.clone(); value.payload = payload.into(); Self { value, ..self } diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index 6dc85c4046..cab5c2333a 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -14,13 +14,13 @@ use crate::sample::{QoS, QoSBuilder}; use crate::Encoding; use crate::KeyExpr; -use crate::Payload; use crate::Priority; use crate::Sample; use crate::SampleKind; use crate::Value; +use crate::ZBytes; #[cfg(feature = "unstable")] -use crate::{payload::OptionPayload, sample::SourceInfo}; +use crate::{bytes::OptionZBytes, sample::SourceInfo}; use std::marker::PhantomData; use uhlc::Timestamp; use zenoh_core::zresult; @@ -49,14 +49,14 @@ pub trait SampleBuilderTrait { fn source_info(self, source_info: SourceInfo) -> Self; /// Attach user-provided data in key-value format #[zenoh_macros::unstable] - fn attachment>(self, attachment: T) -> Self; + fn attachment>(self, attachment: T) -> Self; } pub trait ValueBuilderTrait { /// Set the [`Encoding`] fn encoding>(self, encoding: T) -> Self; /// Sets the payload - fn payload>(self, payload: T) -> Self; + fn payload>(self, payload: T) -> Self; /// Sets both payload and encoding at once. /// This is convenient for passing user type which supports `Into` when both payload and encoding depends on user type fn value>(self, value: T) -> Self; @@ -82,7 +82,7 @@ impl SampleBuilder { ) -> SampleBuilder where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoPayload: Into, { Self { sample: Sample { @@ -110,7 +110,7 @@ impl SampleBuilder { Self { sample: Sample { key_expr: key_expr.into(), - payload: Payload::empty(), + payload: ZBytes::empty(), kind: SampleKind::Delete, encoding: Encoding::default(), timestamp: None, @@ -175,8 +175,8 @@ impl SampleBuilderTrait for SampleBuilder { } #[zenoh_macros::unstable] - fn attachment>(self, attachment: U) -> Self { - let attachment: OptionPayload = attachment.into(); + fn attachment>(self, attachment: U) -> Self { + let attachment: OptionZBytes = attachment.into(); Self { sample: Sample { attachment: attachment.into(), @@ -224,7 +224,7 @@ impl ValueBuilderTrait for SampleBuilder { _t: PhantomData::, } } - fn payload>(self, payload: T) -> Self { + fn payload>(self, payload: T) -> Self { Self { sample: Sample { payload: payload.into(), diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index b8fc62be57..b1093847bb 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -13,8 +13,8 @@ // //! Sample primitives +use crate::bytes::ZBytes; use crate::encoding::Encoding; -use crate::payload::Payload; use crate::prelude::{KeyExpr, Value}; use crate::sample::builder::QoSBuilderTrait; use crate::time::Timestamp; @@ -67,7 +67,7 @@ pub(crate) trait DataInfoIntoSample { ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into; + IntoPayload: Into; } impl DataInfoIntoSample for DataInfo { @@ -84,7 +84,7 @@ impl DataInfoIntoSample for DataInfo { ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoPayload: Into, { Sample { key_expr: key_expr.into(), @@ -114,7 +114,7 @@ impl DataInfoIntoSample for Option { ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoPayload: Into, { if let Some(data_info) = self { data_info.into_sample( @@ -213,12 +213,12 @@ impl From> for SourceInfo { mod attachment { #[cfg(feature = "unstable")] - use crate::payload::Payload; + use crate::bytes::ZBytes; #[cfg(feature = "unstable")] use zenoh_protocol::zenoh::ext::AttachmentType; #[zenoh_macros::unstable] - pub type Attachment = Payload; + pub type Attachment = ZBytes; #[zenoh_macros::unstable] impl From for AttachmentType { @@ -274,7 +274,7 @@ pub use attachment::Attachment; /// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. pub struct SampleFields { pub key_expr: KeyExpr<'static>, - pub payload: Payload, + pub payload: ZBytes, pub kind: SampleKind, pub encoding: Encoding, pub timestamp: Option, @@ -311,7 +311,7 @@ impl From for SampleFields { #[derive(Clone, Debug)] pub struct Sample { pub(crate) key_expr: KeyExpr<'static>, - pub(crate) payload: Payload, + pub(crate) payload: ZBytes, pub(crate) kind: SampleKind, pub(crate) encoding: Encoding, pub(crate) timestamp: Option, @@ -333,7 +333,7 @@ impl Sample { /// Gets the payload of this Sample. #[inline] - pub fn payload(&self) -> &Payload { + pub fn payload(&self) -> &ZBytes { &self.payload } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 6f047fda5d..b075de042b 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use crate::admin; +use crate::bytes::ZBytes; use crate::config::Config; use crate::config::Notifier; use crate::encoding::Encoding; @@ -23,7 +24,6 @@ use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; -use crate::payload::Payload; use crate::prelude::KeyExpr; use crate::prelude::Locality; use crate::publication::*; @@ -715,7 +715,7 @@ impl Session { where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoPayload: Into, + IntoPayload: Into, { PublicationBuilder { publisher: self.declare_publisher(key_expr), diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index d1b582111a..26165334eb 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,13 +13,13 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload}; +use crate::{bytes::ZBytes, encoding::Encoding}; /// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. #[non_exhaustive] #[derive(Clone, Debug, PartialEq, Eq)] pub struct Value { - pub(crate) payload: Payload, + pub(crate) payload: ZBytes, pub(crate) encoding: Encoding, } @@ -27,7 +27,7 @@ impl Value { /// Creates a new [`Value`] with specified [`Payload`] and [`Encoding`]. pub fn new(payload: T, encoding: E) -> Self where - T: Into, + T: Into, E: Into, { Value { @@ -38,7 +38,7 @@ impl Value { /// Creates an empty [`Value`]. pub const fn empty() -> Self { Value { - payload: Payload::empty(), + payload: ZBytes::empty(), encoding: Encoding::default(), } } @@ -49,7 +49,7 @@ impl Value { } /// Gets binary [`Payload`] of this [`Value`]. - pub fn payload(&self) -> &Payload { + pub fn payload(&self) -> &ZBytes { &self.payload } @@ -61,7 +61,7 @@ impl Value { impl From for Value where - T: Into, + T: Into, { fn from(t: T) -> Self { Value { From e0d7784a7467481c921da46581bdf14d077144f0 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:27:27 +0200 Subject: [PATCH 257/357] Remove Attachment in favor of ZBYtes --- zenoh-ext/src/group.rs | 6 +++--- zenoh/src/bytes.rs | 16 ++++++++++++++- zenoh/src/publication.rs | 7 +++---- zenoh/src/query.rs | 6 +++--- zenoh/src/queryable.rs | 10 +++++----- zenoh/src/sample/mod.rs | 41 ++++++-------------------------------- zenoh/src/session.rs | 10 ++++------ zenoh/tests/attachments.rs | 12 +++++------ 8 files changed, 45 insertions(+), 63 deletions(-) diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 83b3c7b199..839623cdb9 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -24,7 +24,7 @@ use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use zenoh::bytes::PayloadReader; +use zenoh::bytes::ZBytesReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; @@ -242,7 +242,7 @@ async fn net_event_handler(z: Arc, state: Arc) { .await .unwrap(); while let Ok(s) = sub.recv_async().await { - match bincode::deserialize_from::(s.payload().reader()) { + match bincode::deserialize_from::(s.payload().reader()) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { tracing::debug!("Member join: {:?}", &je.member); @@ -301,7 +301,7 @@ async fn net_event_handler(z: Arc, state: Arc) { while let Ok(reply) = receiver.recv_async().await { match reply.result() { Ok(sample) => { - match bincode::deserialize_from::( + match bincode::deserialize_from::( sample.payload().reader(), ) { Ok(m) => { diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 91bae8f517..9e2e441f3a 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -28,7 +28,7 @@ use zenoh_buffers::{ ZBufReader, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::core::Properties; +use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::SharedMemoryBuf; @@ -1364,6 +1364,20 @@ impl From<&ZBytes> for StringOrBase64 { } } +impl From for AttachmentType { + fn from(this: ZBytes) -> Self { + AttachmentType { + buffer: this.into(), + } + } +} + +impl From> for ZBytes { + fn from(this: AttachmentType) -> Self { + this.buffer.into() + } +} + mod tests { #[test] fn serializer() { diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 8dd5883f0f..c59cca8b9e 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -20,9 +20,8 @@ use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ - bytes::OptionZBytes, + bytes::{OptionZBytes, ZBytes}, handlers::{Callback, DefaultHandler, IntoHandler}, - sample::Attachment, Id, }; use std::future::Ready; @@ -76,7 +75,7 @@ pub struct PublicationBuilder { #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub(crate) attachment: Option, } pub type SessionPutBuilder<'a, 'b> = @@ -946,7 +945,7 @@ fn resolve_put( encoding: Encoding, timestamp: Option, #[cfg(feature = "unstable")] source_info: SourceInfo, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { tracing::trace!("write({:?}, [...])", &publisher.key_expr); let primitives = zread!(publisher.session.state) diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index a0c2e3cfbb..db7071c278 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -13,12 +13,12 @@ // //! Query primitives. +#[cfg(feature = "unstable")] +use crate::bytes::{OptionZBytes, ZBytes}; use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; use crate::sample::QoSBuilder; use crate::Session; -#[cfg(feature = "unstable")] -use crate::{bytes::OptionZBytes, sample::Attachment}; use std::collections::HashMap; use std::future::Ready; use std::time::Duration; @@ -149,7 +149,7 @@ pub struct GetBuilder<'a, 'b, Handler> { pub(crate) handler: Handler, pub(crate) value: Option, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub(crate) attachment: Option, #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, } diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 8147eb2885..563df461b8 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -26,9 +26,9 @@ use crate::SessionRef; use crate::Undeclarable; #[cfg(feature = "unstable")] use crate::{ - bytes::OptionZBytes, + bytes::{OptionZBytes, ZBytes}, query::ReplyKeyExpr, - sample::{Attachment, SourceInfo}, + sample::SourceInfo, }; use std::fmt; use std::future::Ready; @@ -55,7 +55,7 @@ pub(crate) struct QueryInner { pub(crate) zid: ZenohId, pub(crate) primitives: Arc, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub(crate) attachment: Option, } impl Drop for QueryInner { @@ -116,7 +116,7 @@ impl Query { } #[zenoh_macros::unstable] - pub fn attachment(&self) -> Option<&Attachment> { + pub fn attachment(&self) -> Option<&ZBytes> { self.inner.attachment.as_ref() } @@ -295,7 +295,7 @@ pub struct ReplyBuilder<'a, 'b, T> { source_info: SourceInfo, #[cfg(feature = "unstable")] - attachment: Option, + attachment: Option, } pub type ReplyPutBuilder<'a, 'b> = ReplyBuilder<'a, 'b, ReplyBuilderPut>; diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index b1093847bb..6078a5a350 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -63,7 +63,7 @@ pub(crate) trait DataInfoIntoSample { self, key_expr: IntoKeyExpr, payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, @@ -80,7 +80,7 @@ impl DataInfoIntoSample for DataInfo { self, key_expr: IntoKeyExpr, payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, @@ -110,7 +110,7 @@ impl DataInfoIntoSample for Option { self, key_expr: IntoKeyExpr, payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, @@ -211,32 +211,6 @@ impl From> for SourceInfo { } } -mod attachment { - #[cfg(feature = "unstable")] - use crate::bytes::ZBytes; - #[cfg(feature = "unstable")] - use zenoh_protocol::zenoh::ext::AttachmentType; - - #[zenoh_macros::unstable] - pub type Attachment = ZBytes; - - #[zenoh_macros::unstable] - impl From for AttachmentType { - fn from(this: Attachment) -> Self { - AttachmentType { - buffer: this.into(), - } - } - } - - #[zenoh_macros::unstable] - impl From> for Attachment { - fn from(this: AttachmentType) -> Self { - this.buffer.into() - } - } -} - /// The kind of a `Sample`. #[repr(u8)] #[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] @@ -268,9 +242,6 @@ impl TryFrom for SampleKind { } } -#[zenoh_macros::unstable] -pub use attachment::Attachment; - /// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. pub struct SampleFields { pub key_expr: KeyExpr<'static>, @@ -284,7 +255,7 @@ pub struct SampleFields { #[cfg(feature = "unstable")] pub source_info: SourceInfo, #[cfg(feature = "unstable")] - pub attachment: Option, + pub attachment: Option, } impl From for SampleFields { @@ -321,7 +292,7 @@ pub struct Sample { pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub(crate) attachment: Option, } impl Sample { @@ -371,7 +342,7 @@ impl Sample { /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. #[zenoh_macros::unstable] #[inline] - pub fn attachment(&self) -> Option<&Attachment> { + pub fn attachment(&self) -> Option<&ZBytes> { self.attachment.as_ref() } } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index b075de042b..4a6a312dcf 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -29,8 +29,6 @@ use crate::prelude::Locality; use crate::publication::*; use crate::query::*; use crate::queryable::*; -#[cfg(feature = "unstable")] -use crate::sample::Attachment; use crate::sample::DataInfo; use crate::sample::DataInfoIntoSample; use crate::sample::QoS; @@ -1482,7 +1480,7 @@ impl Session { key_expr: &WireExpr, info: Option, payload: ZBuf, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) { let mut callbacks = SingleOrVec::default(); let state = zread!(self.state); @@ -1613,7 +1611,7 @@ impl Session { destination: Locality, timeout: Duration, value: Option, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, #[cfg(feature = "unstable")] source: SourceInfo, callback: Callback<'static, Reply>, ) -> ZResult<()> { @@ -1755,7 +1753,7 @@ impl Session { _target: TargetType, _consolidation: Consolidation, body: Option, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) { let (primitives, key_expr, queryables) = { let state = zread!(self.state); @@ -2240,7 +2238,7 @@ impl Primitives for Session { payload: ZBuf, info: DataInfo, #[cfg(feature = "unstable")] - attachment: Option, + attachment: Option, } let Ret { payload, diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index df9ebcca2e..975103b8c9 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,8 +14,8 @@ #[cfg(feature = "unstable")] #[test] fn attachment_pubsub() { + use zenoh::bytes::ZBytes; use zenoh::prelude::sync::*; - use zenoh::sample::Attachment; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -44,12 +44,12 @@ fn attachment_pubsub() { zenoh .put("test/attachment", "put") - .attachment(Attachment::from_iter(backer.iter())) + .attachment(ZBytes::from_iter(backer.iter())) .res() .unwrap(); publisher .put("publisher") - .attachment(Attachment::from_iter(backer.iter())) + .attachment(ZBytes::from_iter(backer.iter())) .res() .unwrap(); } @@ -58,7 +58,7 @@ fn attachment_pubsub() { #[cfg(feature = "unstable")] #[test] fn attachment_queries() { - use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait, sample::Attachment}; + use zenoh::{bytes::ZBytes, prelude::sync::*, sample::builder::SampleBuilderTrait}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -84,7 +84,7 @@ fn attachment_queries() { query.key_expr().clone(), query.value().unwrap().payload().clone(), ) - .attachment(Attachment::from_iter( + .attachment(ZBytes::from_iter( attachment .iter::<( [u8; std::mem::size_of::()], @@ -109,7 +109,7 @@ fn attachment_queries() { let get = zenoh .get("test/attachment") .payload("query") - .attachment(Attachment::from_iter(backer.iter())) + .attachment(ZBytes::from_iter(backer.iter())) .res() .unwrap(); while let Ok(reply) = get.recv() { From 89d6a2df6c822e1d05a9ea5e2b15d567a6418402 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 10:49:07 +0200 Subject: [PATCH 258/357] Fix ZBytes doc --- zenoh/src/bytes.rs | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 9e2e441f3a..f4fd882467 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -48,13 +48,13 @@ pub trait Deserialize<'a, T> { fn deserialize(self, t: &'a ZBytes) -> Result; } -/// A payload contains the serialized bytes of user data. +/// ZBytes contains the serialized bytes of user data. #[repr(transparent)] #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct ZBytes(ZBuf); impl ZBytes { - /// Create an empty payload. + /// Create an empty ZBytes. pub const fn empty() -> Self { Self(ZBuf::empty()) } @@ -67,12 +67,12 @@ impl ZBytes { Self(t.into()) } - /// Returns wether the payload is empty or not. + /// Returns wether the ZBytes is empty or not. pub fn is_empty(&self) -> bool { self.0.is_empty() } - /// Returns the length of the payload. + /// Returns the length of the ZBytes. pub fn len(&self) -> usize { self.0.len() } @@ -113,11 +113,11 @@ impl ZBytes { /// Serialize an object of type `T` as a [`Value`] using the [`ZSerde`]. /// /// ```rust - /// use zenoh::payload::ZBytes; + /// use zenoh::bytes::ZBytes; /// /// let start = String::from("abc"); - /// let payload = ZBytes::serialize(start.clone()); - /// let end: String = payload.deserialize().unwrap(); + /// let bytes = ZBytes::serialize(start.clone()); + /// let end: String = bytes.deserialize().unwrap(); /// assert_eq!(start, end); /// ``` pub fn serialize(t: T) -> Self @@ -276,7 +276,7 @@ impl From for Option { } } -/// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. +/// The default serializer for ZBytes. It supports primitives types, such as: Vec, int, uint, float, string, bool. /// It also supports common Rust serde values. #[derive(Clone, Copy, Debug)] pub struct ZSerde; @@ -921,9 +921,9 @@ impl Serialize<&serde_json::Value> for ZSerde { type Output = Result; fn serialize(self, t: &serde_json::Value) -> Self::Output { - let mut payload = ZBytes::empty(); - serde_json::to_writer(payload.writer(), t)?; - Ok(payload) + let mut bytes = ZBytes::empty(); + serde_json::to_writer(bytes.writer(), t)?; + Ok(bytes) } } @@ -980,9 +980,9 @@ impl Serialize<&serde_yaml::Value> for ZSerde { type Output = Result; fn serialize(self, t: &serde_yaml::Value) -> Self::Output { - let mut payload = ZBytes::empty(); - serde_yaml::to_writer(payload.writer(), t)?; - Ok(payload) + let mut bytes = ZBytes::empty(); + serde_yaml::to_writer(bytes.writer(), t)?; + Ok(bytes) } } @@ -1039,9 +1039,9 @@ impl Serialize<&serde_cbor::Value> for ZSerde { type Output = Result; fn serialize(self, t: &serde_cbor::Value) -> Self::Output { - let mut payload = ZBytes::empty(); - serde_cbor::to_writer(payload.0.writer(), t)?; - Ok(payload) + let mut bytes = ZBytes::empty(); + serde_cbor::to_writer(bytes.0.writer(), t)?; + Ok(bytes) } } @@ -1098,13 +1098,13 @@ impl Serialize<&serde_pickle::Value> for ZSerde { type Output = Result; fn serialize(self, t: &serde_pickle::Value) -> Self::Output { - let mut payload = ZBytes::empty(); + let mut bytes = ZBytes::empty(); serde_pickle::value_to_writer( - &mut payload.0.writer(), + &mut bytes.0.writer(), t, serde_pickle::SerOptions::default(), )?; - Ok(payload) + Ok(bytes) } } @@ -1279,9 +1279,9 @@ where { type Error = ZError; - fn deserialize(self, payload: &ZBytes) -> Result<(A, B), Self::Error> { + fn deserialize(self, bytes: &ZBytes) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); - let mut reader = payload.0.reader(); + let mut reader = bytes.0.reader(); let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; let apld = ZBytes::new(abuf); From 459318430d49806c8a4016ea16337d785b2bee52 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 11:23:45 +0200 Subject: [PATCH 259/357] Fix unstable feature selector test --- zenoh/src/selector.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 15ce36faa8..2c7fc2d782 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -330,7 +330,6 @@ impl<'a> From> for Selector<'a> { fn selector_accessors() { use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM as ANYKE; - let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); for selector in [ "hello/there?_timetrick", "hello/there?_timetrick;_time", @@ -346,12 +345,25 @@ fn selector_accessors() { assert_eq!(selector.parameters().get("_timetrick").unwrap(), ""); - selector.parameters_mut().set_time_range(time_range); + let time_range = "[now(-2s)..now(2s)]"; + zcondfeat!( + "unstable", + { + let time_range = time_range.parse().unwrap(); + selector.parameters_mut().set_time_range(time_range); + assert_eq!( + selector.parameters().time_range().unwrap().unwrap(), + time_range + ); + }, + { + selector.parameters_mut().insert(TIME_RANGE_KEY, time_range); + } + ); assert_eq!( - selector.parameters().time_range().unwrap().unwrap(), + selector.parameters().get(TIME_RANGE_KEY).unwrap(), time_range ); - assert!(selector.parameters().contains_key(TIME_RANGE_KEY)); let hm: HashMap<&str, &str> = HashMap::from(selector.parameters()); assert!(hm.contains_key(TIME_RANGE_KEY)); From 5b0bdaa97fa13d75dfaf7ef603053d0e140d75c0 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 11:24:14 +0200 Subject: [PATCH 260/357] Fix serialization of 0 integers --- zenoh/src/bytes.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index f4fd882467..56046f9e2a 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -696,10 +696,9 @@ macro_rules! impl_int { fn serialize(self, t: $t) -> Self::Output { let bs = t.to_le_bytes(); - let end = if t == 0 as $t { - 0 - } else { - 1 + bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1) + let mut end = 1; + if t != 0 as $t { + end += bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1); }; // SAFETY: // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 @@ -1223,7 +1222,10 @@ macro_rules! impl_tuple { let mut buffer: ZBuf = ZBuf::empty(); let mut writer = buffer.writer(); let apld: ZBytes = a.into(); + println!("Write A: {:?}", apld.0); + let bpld: ZBytes = b.into(); + println!("Write B: {:?}", bpld.0); // SAFETY: we are serializing slices on a ZBuf, so serialization will never // fail unless we run out of memory. In that case, Rust memory allocator From 3f770295f68b61d73fe84cbf47e920587172564f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 12:05:00 +0200 Subject: [PATCH 261/357] Remove forgotten println --- zenoh/src/bytes.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 56046f9e2a..6dce95980f 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -1222,10 +1222,7 @@ macro_rules! impl_tuple { let mut buffer: ZBuf = ZBuf::empty(); let mut writer = buffer.writer(); let apld: ZBytes = a.into(); - println!("Write A: {:?}", apld.0); - let bpld: ZBytes = b.into(); - println!("Write B: {:?}", bpld.0); // SAFETY: we are serializing slices on a ZBuf, so serialization will never // fail unless we run out of memory. In that case, Rust memory allocator From d9a5ae671bf33ef6a1bda93a16f872ba3d86ed64 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 12:18:51 +0200 Subject: [PATCH 262/357] Sort use --- zenoh/src/bytes.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 6dce95980f..714e1c3ff1 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -20,12 +20,11 @@ use std::{ string::FromUtf8Error, sync::Arc, }; use unwrap_infallible::UnwrapInfallible; -use zenoh_buffers::ZBufWriter; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, writer::HasWriter, - ZBufReader, ZSlice, + ZBufReader, ZBufWriter, ZSlice, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; From 2daee35420a67b7bde70e491dfaf504496913d3c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Wed, 17 Apr 2024 12:22:33 +0200 Subject: [PATCH 263/357] Sort use --- zenoh/src/bytes.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 714e1c3ff1..6f8ba23a65 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -14,9 +14,8 @@ //! ZBytes primitives. use crate::buffers::ZBuf; -use std::str::Utf8Error; use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, + borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, string::FromUtf8Error, sync::Arc, }; use unwrap_infallible::UnwrapInfallible; From 09f9529e6b09a3384c9a1cb7bde37e94f81bc82d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 17 Apr 2024 18:44:14 +0200 Subject: [PATCH 264/357] removed unused EncodingBuilder --- zenoh/src/api/encoding.rs | 2 -- zenoh/src/lib.rs | 1 - 2 files changed, 3 deletions(-) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 3283ec1a84..7518671eed 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -848,5 +848,3 @@ impl EncodingMapping for Box { impl EncodingMapping for SharedMemoryBuf { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } - -pub struct EncodingBuilder(Encoding); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 8f7645a965..655a2699bd 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -224,7 +224,6 @@ pub mod value { /// Encoding support pub mod encoding { pub use crate::api::encoding::Encoding; - pub use crate::api::encoding::EncodingBuilder; } /// Payload primitives From 6a05bcb18e68d8cd30fd618e3b9d9793995952a2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 17 Apr 2024 21:46:38 +0200 Subject: [PATCH 265/357] compiation fix --- examples/examples/z_sub_shm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 630876f287..f89df5ee60 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -20,7 +20,7 @@ use zenoh_shm::SharedMemoryBuf; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (mut config, key_expr) = parse_args(); From 18e60a32124303f973acc31d707bce0b323fce33 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 17 Apr 2024 21:59:27 +0200 Subject: [PATCH 266/357] compile fix --- zenoh/src/api/session.rs | 1 - zenoh/src/net/routing/interceptor/access_control.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 412b4a2f6d..9ed85899c3 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -31,7 +31,6 @@ use super::{ Id, }; use crate::{ - api::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}, }; use std::{ diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 1b0876160a..1467a9baa5 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -23,7 +23,7 @@ use super::{ InterceptorFactoryTrait, InterceptorTrait, }; use crate::net::routing::RoutingContext; -use crate::KeyExpr; +use crate::api::key_expr::KeyExpr; use std::any::Any; use std::sync::Arc; use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject, ZenohId}; From f96c6146b47f9676911fdeaea57ff519fcf1eb86 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 17 Apr 2024 22:11:23 +0200 Subject: [PATCH 267/357] build fix --- io/zenoh-transport/tests/unicast_time.rs | 16 ++++++++-------- zenoh/src/api/session.rs | 5 ++--- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index baebb5a95f..088db05049 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -230,7 +230,7 @@ async fn time_lowlatency_transport(endpoint: &EndPoint) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -239,7 +239,7 @@ async fn time_tcp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -248,7 +248,7 @@ async fn time_tcp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -257,7 +257,7 @@ async fn time_udp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -266,7 +266,7 @@ async fn time_udp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -275,7 +275,7 @@ async fn time_ws_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -304,7 +304,7 @@ async fn time_unixpipe_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unix_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -318,7 +318,7 @@ async fn time_unix_only() { #[ignore] async fn time_tls_only() { use zenoh_link::tls::config::*; - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 9ed85899c3..56a1288f40 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -22,6 +22,7 @@ use super::{ key_expr::{KeyExpr, KeyExprInner}, payload::Payload, publication::Priority, + query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, @@ -30,9 +31,7 @@ use super::{ value::Value, Id, }; -use crate::{ - net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}, -}; +use crate::net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, From fc835434d212750fac6d08771a81a830eecb6a52 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 17 Apr 2024 22:16:55 +0200 Subject: [PATCH 268/357] cargo fmt --- zenoh/src/net/routing/interceptor/access_control.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 1467a9baa5..b23db9765e 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -22,8 +22,8 @@ use super::{ authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, InterceptorFactoryTrait, InterceptorTrait, }; -use crate::net::routing::RoutingContext; use crate::api::key_expr::KeyExpr; +use crate::net::routing::RoutingContext; use std::any::Any; use std::sync::Arc; use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject, ZenohId}; From a12d2c80290fbd649129c3c3768eb7a84cc1c64e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 18 Apr 2024 12:10:35 +0200 Subject: [PATCH 269/357] Fix build --- examples/examples/z_sub_shm.rs | 2 +- io/zenoh-transport/tests/unicast_time.rs | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index f89df5ee60..d304d6a7f6 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -20,7 +20,7 @@ use zenoh_shm::SharedMemoryBuf; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let (mut config, key_expr) = parse_args(); diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index baebb5a95f..5b9209ada3 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -230,7 +230,7 @@ async fn time_lowlatency_transport(endpoint: &EndPoint) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -239,7 +239,7 @@ async fn time_tcp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_tcp_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -248,7 +248,7 @@ async fn time_tcp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -257,7 +257,7 @@ async fn time_udp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_udp_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -266,7 +266,7 @@ async fn time_udp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -275,7 +275,7 @@ async fn time_ws_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_ws_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); time_lowlatency_transport(&endpoint).await; } @@ -284,7 +284,7 @@ async fn time_ws_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unixpipe_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only".parse().unwrap(); time_universal_transport(&endpoint).await; } @@ -293,7 +293,7 @@ async fn time_unixpipe_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unixpipe_only_with_lowlatency_transport() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_transport" .parse() .unwrap(); @@ -304,7 +304,7 @@ async fn time_unixpipe_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_unix_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -318,7 +318,7 @@ async fn time_unix_only() { #[ignore] async fn time_tls_only() { use zenoh_link::tls::config::*; - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -514,7 +514,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn time_vsock_only() { - zenoh_util::init_log_from_env(); + zenoh_util::try_init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); time_lowlatency_transport(&endpoint).await; } From b6cc94577fdff9e3c8d8c51eafc777928cdb77bd Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 18 Apr 2024 12:19:59 +0200 Subject: [PATCH 270/357] build fix unifished --- Cargo.lock | 2 -- plugins/zenoh-plugin-rest/Cargo.toml | 1 - plugins/zenoh-plugin-rest/src/lib.rs | 7 +++--- .../zenoh-plugin-storage-manager/Cargo.toml | 1 - .../zenoh-plugin-storage-manager/src/lib.rs | 5 +++-- zenoh-ext/src/group.rs | 1 + zenoh/src/api/admin.rs | 2 +- zenoh/src/api/builders/publication.rs | 22 +++++++++---------- zenoh/src/api/builders/sample.rs | 6 ++--- zenoh/src/api/{payload.rs => bytes.rs} | 3 --- zenoh/src/api/mod.rs | 2 +- zenoh/src/api/publication.rs | 8 +++---- zenoh/src/api/query.rs | 8 ++----- zenoh/src/api/queryable.rs | 12 +++++----- zenoh/src/api/sample.rs | 8 +------ zenoh/src/api/session.rs | 10 +++++---- zenoh/src/lib.rs | 17 +++++++------- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 4 ++-- 19 files changed, 52 insertions(+), 69 deletions(-) rename zenoh/src/api/{payload.rs => bytes.rs} (99%) diff --git a/Cargo.lock b/Cargo.lock index c2bbd6a6db..7653db324a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5447,7 +5447,6 @@ dependencies = [ "zenoh", "zenoh-plugin-trait", "zenoh-result", - "zenoh-util", ] [[package]] @@ -5473,7 +5472,6 @@ dependencies = [ "urlencoding", "zenoh", "zenoh-plugin-trait", - "zenoh-util", "zenoh_backend_traits", ] diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index 19fa9eafdc..05f010bdb8 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -36,7 +36,6 @@ anyhow = { workspace = true, features = ["default"] } async-std = { workspace = true, features = ["default", "attributes"] } base64 = { workspace = true } const_format = { workspace = true } -zenoh-util = {workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index fb5aa96a99..7b51f2054b 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -29,11 +29,10 @@ use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; -use zenoh::core::AsyncResolve; +use zenoh::bytes::{StringOrBase64, ZBytes}; +use zenoh::core::{try_init_log_from_env, AsyncResolve}; use zenoh::encoding::Encoding; use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::payload::{Payload, StringOrBase64}; -use zenoh::bytes::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; @@ -245,7 +244,7 @@ impl Plugin for RestPlugin { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + try_init_log_from_env(); tracing::debug!("REST plugin {}", LONG_VERSION.as_str()); let runtime_conf = runtime.config().lock(); diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 5d479d04c6..9486ab5367 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -37,7 +37,6 @@ async-trait = { workspace = true } crc = { workspace = true } const_format = { workspace = true } derive-new = { workspace = true } -zenoh-util = { workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index ac3f009ab9..a87a6194cb 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -22,6 +22,7 @@ use async_std::task; use flume::Sender; use memory_backend::MemoryBackend; +use zenoh::core::try_init_log_from_env; use std::collections::HashMap; use std::convert::TryFrom; use std::sync::Arc; @@ -68,7 +69,7 @@ impl Plugin for StoragesPlugin { type Instance = zenoh::plugins::RunningPlugin; fn start(name: &str, runtime: &Self::StartArgs) -> ZResult { - zenoh_util::try_init_log_from_env(); + try_init_log_from_env(); tracing::debug!("StorageManager plugin {}", Self::PLUGIN_VERSION); let config = { PluginConfig::try_from((name, runtime.config().lock().plugin(name).unwrap())) }?; @@ -101,7 +102,7 @@ impl StorageRuntimeInner { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + try_init_log_from_env(); let PluginConfig { name, backend_search_dirs, diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 6d4f688327..7528dcbdb9 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,6 +25,7 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; use zenoh::prelude::r#async::*; +use zenoh::internal::{bail, Condition, TaskController}; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index 7055eb63da..c221d7f27c 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // use super::{ + bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, - payload::Payload, queryable::Query, sample::Locality, sample::{DataInfo, SampleKind}, diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index c710d0ad79..639ae4ed37 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -15,19 +15,19 @@ use std::future::Ready; // use crate::api::builders::sample::SampleBuilderTrait; use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; -use crate::api::key_expr::KeyExpr; #[cfg(feature = "unstable")] -use crate::api::payload::OptionPayload; +use crate::api::bytes::OptionZBytes; +use crate::api::bytes::ZBytes; +use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; #[cfg(feature = "unstable")] -use crate::api::sample::Attachment; use crate::api::sample::Locality; use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; use crate::api::session::SessionRef; use crate::api::value::Value; -use crate::api::{encoding::Encoding, payload::Payload, publication::Publisher}; +use crate::api::{encoding::Encoding, publication::Publisher}; use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::network::Mapping; @@ -45,7 +45,7 @@ pub type PublisherDeleteBuilder<'a> = #[derive(Debug, Clone)] pub struct PublicationBuilderPut { - pub(crate) payload: Payload, + pub(crate) payload: ZBytes, pub(crate) encoding: Encoding, } #[derive(Debug, Clone)] @@ -79,7 +79,7 @@ pub struct PublicationBuilder { #[cfg(feature = "unstable")] pub(crate) source_info: SourceInfo, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + pub(crate) attachment: Option, } impl QoSBuilderTrait for PublicationBuilder, T> { @@ -130,7 +130,7 @@ impl

ValueBuilderTrait for PublicationBuilder { fn payload(self, payload: IntoPayload) -> Self where - IntoPayload: Into, + IntoPayload: Into, { Self { kind: PublicationBuilderPut { @@ -158,8 +158,8 @@ impl SampleBuilderTrait for PublicationBuilder { } } #[cfg(feature = "unstable")] - fn attachment>(self, attachment: TA) -> Self { - let attachment: OptionPayload = attachment.into(); + fn attachment>(self, attachment: TA) -> Self { + let attachment: OptionZBytes = attachment.into(); Self { attachment: attachment.into(), ..self @@ -202,7 +202,7 @@ impl SyncResolve for PublicationBuilder, PublicationBui fn res_sync(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; publisher.resolve_put( - Payload::empty(), + ZBytes::empty(), SampleKind::Delete, Encoding::ZENOH_BYTES, self.timestamp, @@ -407,7 +407,7 @@ impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { fn res_sync(self) -> ::To { self.publisher.resolve_put( - Payload::empty(), + ZBytes::empty(), SampleKind::Delete, Encoding::ZENOH_BYTES, self.timestamp, diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 420a150509..ccf10d9574 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -11,10 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::marker::PhantomData; +use crate::api::bytes::ZBytes; use crate::api::encoding::Encoding; use crate::api::key_expr::KeyExpr; -use crate::api::payload::Payload; use crate::api::publication::Priority; use crate::api::sample::QoS; use crate::api::sample::QoSBuilder; @@ -22,7 +21,8 @@ use crate::api::sample::Sample; use crate::api::sample::SampleKind; use crate::api::value::Value; #[cfg(feature = "unstable")] -use crate::{api::payload::OptionPayload, sample::SourceInfo}; +use crate::{api::bytes::OptionZBytes, sample::SourceInfo}; +use std::marker::PhantomData; use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; diff --git a/zenoh/src/api/payload.rs b/zenoh/src/api/bytes.rs similarity index 99% rename from zenoh/src/api/payload.rs rename to zenoh/src/api/bytes.rs index d9e6c1afdd..6f8ba23a65 100644 --- a/zenoh/src/api/payload.rs +++ b/zenoh/src/api/bytes.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // -<<<<<<<< HEAD:zenoh/src/api/payload.rs -======== //! ZBytes primitives. ->>>>>>>> protocol_changes:zenoh/src/bytes.rs use crate::buffers::ZBuf; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs index ab38844ea6..c2cc3504f0 100644 --- a/zenoh/src/api/mod.rs +++ b/zenoh/src/api/mod.rs @@ -16,13 +16,13 @@ pub(crate) type Id = u32; pub(crate) mod admin; pub(crate) mod builders; +pub(crate) mod bytes; pub(crate) mod encoding; pub(crate) mod handlers; pub(crate) mod info; pub(crate) mod key_expr; #[cfg(feature = "unstable")] pub(crate) mod liveliness; -pub(crate) mod payload; pub(crate) mod plugins; pub(crate) mod publication; pub(crate) mod query; diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index b8c39a3fb5..22375decd3 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -17,9 +17,9 @@ use super::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherDeleteBuilder, PublisherPutBuilder, }, + bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, - payload::Payload, sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, session::{SessionRef, Undeclarable}, }; @@ -43,7 +43,7 @@ use zenoh_result::{Error, ZResult}; #[zenoh_macros::unstable] use { crate::api::handlers::{Callback, DefaultHandler, IntoHandler}, - crate::api::sample::{Attachment, SourceInfo}, + crate::api::sample::SourceInfo, crate::api::Id, zenoh_protocol::core::EntityGlobalId, zenoh_protocol::core::EntityId, @@ -533,12 +533,12 @@ impl<'a> Sink for Publisher<'a> { impl Publisher<'_> { pub(crate) fn resolve_put( &self, - payload: Payload, + payload: ZBytes, kind: SampleKind, encoding: Encoding, timestamp: Option, #[cfg(feature = "unstable")] source_info: SourceInfo, - #[cfg(feature = "unstable")] attachment: Option, + #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { tracing::trace!("write({:?}, [...])", &self.key_expr); let primitives = zread!(self.session.state) diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index dd5d269fa4..164be63b8c 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -14,10 +14,10 @@ use super::{ builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, + bytes::ZBytes, encoding::Encoding, handlers::{locked, Callback, DefaultHandler, IntoHandler}, key_expr::KeyExpr, - payload::Payload, publication::Priority, sample::{Locality, QoSBuilder, Sample}, selector::Selector, @@ -31,11 +31,7 @@ use zenoh_protocol::core::{CongestionControl, ZenohId}; use zenoh_result::ZResult; #[zenoh_macros::unstable] -use super::{ - builders::sample::SampleBuilderTrait, - payload::OptionPayload, - sample::{Attachment, SourceInfo}, -}; +use super::{builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo}; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index 3c013fc8d7..2d84530f6b 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -13,10 +13,10 @@ // use super::{ builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, + bytes::ZBytes, encoding::Encoding, handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, - payload::Payload, publication::Priority, sample::{Locality, QoSBuilder, Sample, SampleKind}, selector::{Parameters, Selector}, @@ -43,10 +43,8 @@ use zenoh_result::ZResult; #[zenoh_macros::unstable] use { super::{ - builders::sample::SampleBuilderTrait, - payload::OptionPayload, - query::ReplyKeyExpr, - sample::{Attachment, SourceInfo}, + builders::sample::SampleBuilderTrait, bytes::OptionZBytes, query::ReplyKeyExpr, + sample::SourceInfo, }, zenoh_protocol::core::EntityGlobalId, }; @@ -283,8 +281,8 @@ impl AsyncResolve for ReplySample<'_> { #[derive(Debug)] pub struct ReplyBuilderPut { - payload: super::ZBytes, - encoding: super::Encoding, + payload: ZBytes, + encoding: Encoding, } #[derive(Debug)] pub struct ReplyBuilderDelete; diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 3aa03c4392..b9f77a7157 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -14,9 +14,8 @@ //! Sample primitives use super::{ - builders::sample::QoSBuilderTrait, encoding::Encoding, key_expr::KeyExpr, payload::Payload, + builders::sample::QoSBuilderTrait, bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, publication::Priority, value::Value, -bytes::ZBytes }; #[cfg(feature = "unstable")] use serde::Serialize; @@ -26,11 +25,6 @@ use zenoh_protocol::{ network::declare::ext::QoSType, }; -#[zenoh_macros::unstable] -pub use attachment::Attachment; -#[zenoh_macros::unstable] -use serde::Serialize; - pub type SourceSn = u64; /// The locality of samples to be received by subscribers or targeted by publishers. diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 01bd3cf9ad..7d6a0c2b66 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -16,14 +16,16 @@ use super::{ builders::publication::{ PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, }, + bytes::ZBytes, encoding::Encoding, handlers::{Callback, DefaultHandler}, info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, - payload::Payload, publication::Priority, - query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, - query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, + query::{ + ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply, + _REPLY_KEY_EXPR_ANY_SEL_PARAM, + }, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, selector::{Selector, TIME_RANGE_KEY}, @@ -83,7 +85,7 @@ use super::{ liveliness::{Liveliness, LivelinessTokenState}, publication::Publisher, publication::{MatchingListenerState, MatchingStatus}, - sample::{Attachment, SourceInfo}, + sample::SourceInfo, }; zconfigurable! { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 655a2699bd..054b256ccd 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -122,6 +122,7 @@ pub mod core { /// A zenoh result. pub use zenoh_result::ZResult as Result; pub use zenoh_util::core::zresult::ErrNo; + pub use zenoh_util::try_init_log_from_env; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate @@ -207,8 +208,6 @@ pub mod sample { pub use crate::api::builders::sample::TimestampBuilderTrait; pub use crate::api::builders::sample::ValueBuilderTrait; #[zenoh_macros::unstable] - pub use crate::api::sample::Attachment; - #[zenoh_macros::unstable] pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; pub use crate::api::sample::SampleKind; @@ -227,13 +226,13 @@ pub mod encoding { } /// Payload primitives -pub mod payload { - pub use crate::api::payload::Deserialize; - pub use crate::api::payload::Payload; - pub use crate::api::payload::PayloadReader; - pub use crate::api::payload::Serialize; - pub use crate::api::payload::StringOrBase64; - pub use crate::api::payload::ZSerde; +pub mod bytes { + pub use crate::api::bytes::Deserialize; + pub use crate::api::bytes::ZBytes; + pub use crate::api::bytes::ZBytesReader; + pub use crate::api::bytes::Serialize; + pub use crate::api::bytes::StringOrBase64; + pub use crate::api::bytes::ZSerde; } /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 630e58f2ca..48665866fe 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -20,7 +20,7 @@ use crate::api::queryable::QueryInner; use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; -use crate::payload::Payload; +use crate::api::bytes::ZBytes; use serde_json::json; use std::collections::HashMap; use std::convert::TryFrom; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index e89542122d..1f9ccb057c 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -43,7 +43,7 @@ pub(crate) mod flat { pub use crate::encoding::*; pub use crate::handlers::*; pub use crate::key_expr::*; - pub use crate::payload::*; + pub use crate::bytes::*; pub use crate::publication::*; pub use crate::query::*; pub use crate::queryable::*; @@ -66,7 +66,7 @@ pub(crate) mod mods { pub use crate::encoding; pub use crate::handlers; pub use crate::key_expr; - pub use crate::payload; + pub use crate::bytes; pub use crate::publication; pub use crate::query; pub use crate::queryable; From 820e2b51606349d73b3e260469a4ce9af3e68862 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 18 Apr 2024 12:21:32 +0200 Subject: [PATCH 271/357] build fix --- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 7b51f2054b..7fe591e3f7 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -494,7 +494,7 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + try_init_log_from_env(); let zid = runtime.zid().to_string(); let session = zenoh::session::init(runtime).res().await.unwrap(); From dd37f4545f0d035ea6e7bc0112d25fa03858f5fa Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 18 Apr 2024 12:22:35 +0200 Subject: [PATCH 272/357] cargo fmt --- plugins/zenoh-plugin-storage-manager/src/lib.rs | 2 +- zenoh-ext/src/group.rs | 2 +- zenoh/src/lib.rs | 4 ++-- zenoh/src/net/runtime/adminspace.rs | 2 +- zenoh/src/prelude.rs | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index a87a6194cb..77617a487b 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -22,12 +22,12 @@ use async_std::task; use flume::Sender; use memory_backend::MemoryBackend; -use zenoh::core::try_init_log_from_env; use std::collections::HashMap; use std::convert::TryFrom; use std::sync::Arc; use std::sync::Mutex; use storages_mgt::StorageMessage; +use zenoh::core::try_init_log_from_env; use zenoh::core::Result as ZResult; use zenoh::core::SyncResolve; use zenoh::internal::zlock; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 7528dcbdb9..1bf37f365c 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -24,8 +24,8 @@ use std::ops::Add; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use zenoh::prelude::r#async::*; use zenoh::internal::{bail, Condition, TaskController}; +use zenoh::prelude::r#async::*; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 054b256ccd..c3418ecb4c 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -228,10 +228,10 @@ pub mod encoding { /// Payload primitives pub mod bytes { pub use crate::api::bytes::Deserialize; - pub use crate::api::bytes::ZBytes; - pub use crate::api::bytes::ZBytesReader; pub use crate::api::bytes::Serialize; pub use crate::api::bytes::StringOrBase64; + pub use crate::api::bytes::ZBytes; + pub use crate::api::bytes::ZBytesReader; pub use crate::api::bytes::ZSerde; } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 48665866fe..d5e2ca1628 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -13,6 +13,7 @@ use super::routing::dispatcher::face::Face; use super::Runtime; use crate::api::builders::sample::ValueBuilderTrait; +use crate::api::bytes::ZBytes; use crate::api::key_expr::KeyExpr; use crate::api::plugins; use crate::api::queryable::Query; @@ -20,7 +21,6 @@ use crate::api::queryable::QueryInner; use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; -use crate::api::bytes::ZBytes; use serde_json::json; use std::collections::HashMap; use std::convert::TryFrom; diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 1f9ccb057c..17286ddeea 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -38,12 +38,12 @@ // Reexport API in flat namespace pub(crate) mod flat { pub use crate::buffers::*; + pub use crate::bytes::*; pub use crate::config::*; pub use crate::core::{Error as ZError, Resolvable, Resolve, Result as ZResult}; pub use crate::encoding::*; pub use crate::handlers::*; pub use crate::key_expr::*; - pub use crate::bytes::*; pub use crate::publication::*; pub use crate::query::*; pub use crate::queryable::*; @@ -61,12 +61,12 @@ pub(crate) mod flat { // Reexport API in hierarchical namespace pub(crate) mod mods { pub use crate::buffers; + pub use crate::bytes; pub use crate::config; pub use crate::core; pub use crate::encoding; pub use crate::handlers; pub use crate::key_expr; - pub use crate::bytes; pub use crate::publication; pub use crate::query; pub use crate::queryable; From f527cecd722c5b3590688cec55f84d0d71b95fc8 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 18 Apr 2024 12:28:53 +0200 Subject: [PATCH 273/357] clippy fix --- plugins/zenoh-plugin-rest/examples/z_serve_sse.rs | 4 ++-- plugins/zenoh-plugin-storage-manager/tests/operations.rs | 1 + plugins/zenoh-plugin-storage-manager/tests/wildcard.rs | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 23d692a554..5e5485d0d2 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -14,7 +14,7 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::config::Config; -use zenoh::core::AsyncResolve; +use zenoh::core::{try_init_log_from_env, AsyncResolve}; use zenoh::key_expr::keyexpr; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; @@ -36,7 +36,7 @@ if(typeof(EventSource) !== "undefined") { #[async_std::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + try_init_log_from_env(); let config = parse_args(); let key = keyexpr::new("demo/sse").unwrap(); diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 6e579539dd..0678431b7e 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,6 +20,7 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; +use zenoh::internal::zasync_executor_init; use zenoh::prelude::r#async::*; use zenoh_plugin_trait::Plugin; diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 0e29e0b531..72fa62f3ca 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,6 +21,7 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; +use zenoh::internal::zasync_executor_init; use zenoh::prelude::r#async::*; use zenoh_plugin_trait::Plugin; From 43d280f5709886aa83fd4ada9c847ed53a264a4e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 18 Apr 2024 15:29:09 +0200 Subject: [PATCH 274/357] Fix valgrind-check build --- ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 3a5a11a98f..676db8f7d0 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -31,10 +31,11 @@ async fn main() { .declare_queryable(&queryable_key_expr.clone()) .callback(move |query| { println!(">> Handling query '{}'", query.selector()); + let queryable_key_expr = queryable_key_expr.clone(); zenoh_runtime::ZRuntime::Application.block_in_place(async move { query .reply( - queryable_key_expr.clone(), + queryable_key_expr, query.value().unwrap().payload().clone(), ) .res() From 26cb7e925d69dc28b5d0407ee315b999a4f32ec2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 18 Apr 2024 16:25:22 +0200 Subject: [PATCH 275/357] locality under unstable --- zenoh/src/api/builders/publication.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 639ae4ed37..ef2224193f 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -255,6 +255,7 @@ pub struct PublisherBuilder<'a, 'b: 'a> { pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, pub(crate) is_express: bool, + #[cfg(feature = "unstable")] pub(crate) destination: Locality, } From 3711d4568bcc4a54ac0009a6573fe3cd6c5da3ae Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Fri, 19 Apr 2024 15:09:29 +0300 Subject: [PATCH 276/357] SHM subsystem: Rust (#823) * add watchdog codebase * integrated and tested POC * Update shm.rs * WIP: implemented buffer headers in separate SHM segment, made buffer generation support * - refactored POSIX shm wrapper - generation works * - use posix_shm module for data segment in SharedMemoryManager - use numeric ID instead of string ID for SHM buffer identification - this feature speeds up segment lookups and reduces wire overhead - remove unnecessary fields from SharedMemoryManager - fix clippy warnings - added comments * WIP on SHM * Fix clippy, additional improvements * Implement watchdog periodic tasks with optional realtime scheduling * [skip ci] WIP on SHM provider\client API * [skip ci] WIP on SHM API * [skip ci] Big WIP on SHM API integration * [skip ci] working SHM (tests passing) * [skip ci] WIP on API * [skip ci] WIP on SHM API * WIP on API * - added SharedMemoryReader to Session and Runtime building API - support ProtocolID exchange in establish - convert buffer based on supported protocol ids * [skip ci] correct shm establish logic * Remove SharedMemoryFactory * [skip ci] - WIP to brush-up the API, eliminate some API flaws - SHM provider is now conceptually made thread-safe for better flexibility * [skip ci] Final updates to SHM provider API * ZSlice: safe mutable acces to inner contents and support for copy-on-write and it's elision as an optimization for SHM buffers * [skip ci] - added ZSliceMut and it's functionality - documented public SHM APIs - brush-up for SHM API - hide SharedMemoryBuf from API and return ZSlice instead - add is_valid() for ZSlice that checks generation counter on SHM * [skip ci] - ZSliceMut API changed - ZSliceBuffer: different API for shared-memory feature - Hide unnecessary APIs from pub in SharedMemoryReader - Fix default alignment calculation for AllocAlignment - Expose necessary SHM API in zenoh crate - Brush-up examples - Fix shmbuf to rawbuf conversion * [skip ci] - ignore some tests because they need to be run globally sequentially - transit shared-memory feature to zenoh-buffers in zenoh-transport * Solved additive feature problem * - remove dependency in zenoh-buffers - make periodic_task compile on win * fix tests * - refine buld system to optimize workspace dependencies - fix posix shm segment size estimation * Update shm.rs * - replace async-std with tokio in zenoh-shm * fix examples * ooops * ignore test with too long SHM segment id for MacOS * lower test complexity to adopt runner capabilities * ignore test with too long SHM segment id for MacOS * - use md5 hashes to shorten SHM segments OS ids (need for MacOS) - enable 128 bit ids in tests * use crc instead of md5 * - get rid of allocator-api2 as work for Allocator is postponed for a while - allow shared-memory feature for macOS and Win in CI (without unixpipe transport) - move deps into workspace - add documentation * move from 'const ID: ProtocolID' to trait interface to support both static (Rust API) and dynamic (other languages API) protocol ID setting * support compile-time and runtime ProtocolID setting * Add more tests to dedicated execution * [skip ci] add more *concurrent tests to dedicated execution * - more SHM API docs - document all SHM API as "unstable" - hide all SHM API behind "unstable" feature - some API brush-up - improve CI for SHM * exclude test_default_features for SHM tests * Move test_helpers.rs into tests to follow the guideline of integration tests. * update doc * Eliminate zenoh-buffers -> zenoh-shm dependency to illustarte the problem * fix: Add disabled by default `shared-memory` feature in zenoh-shm * [skip ci] client storage interface fix * [skip ci] fix map method interface for SharedMemoryProvider * brush-up some things after merge * PR review fixes * [skip ci] add shm feature to zenoh-ext * [skip ci] oops * [skip ci] add shared-memory feature traversing for zenoh-shm dependants * rename ZSliceShm to ZSliceShmMut * - fix build - support SharedMemoryClient sharing * [skip ci] - support SharedMemoryClient sharing - add way to build SharedMemoryClientStorage from slice of clients * [skip ci] - add way to build SharedMemoryClientStorage from slice of clients AND default client set * - remove ZSliceMut API * - stabby fixed for 1.72 - build with zenoh's mainline toolchain 1.72 * fix doctests * - ZSliceShm * Support ZSliceShm and ZSliceShmMut in Payload * - optimize ZSlicBuffer trait - add full ZSliceShm support to Payload - add partial ZSliceShmMut support to Payload - remove a lot of unnecessary code * fix code format errors * - SHM buffer API evolution - Payload API for SHM buffers * [skip ci] polish Payload API for SHM * move SHM Buffer API to separate "slice"module * Improve SHM Buffer API concept * Update payload.rs test * fixes after merge * build fixes * - fix recursion error for in SHM buffer API - add som docs - fix Payload test causing stack overflow :) * - implement trait API for SHM buffers - extend SHM buf Payload API test - add missing DerefMut to zsliceshmmut * Fix merge * Rework Serialize trait * Impl &mut T for Serialize/Deserialize. Fix valgrind CI. * Update commons/zenoh-shm/src/lib.rs * Revert wrong change on log * Update zenoh/src/bytes.rs * Fix use * Fix use * Review fixes * fix recursive call * Update commons/zenoh-shm/src/api/provider/types.rs * unstable for shm slice traits * Add more #[zenoh_macros::unstable_doc] * SHM establishment reorg * add missing ztimeout! in tests --------- Co-authored-by: yuanyuyuan Co-authored-by: Mahmoud Mazouz Co-authored-by: Luca Cominardi --- .config/nextest.toml | 8 +- .github/workflows/ci.yml | 9 +- Cargo.lock | 132 ++- Cargo.toml | 4 + .../src/queryable_get/bin/z_queryable_get.rs | 9 +- commons/zenoh-buffers/src/zslice.rs | 42 +- commons/zenoh-codec/Cargo.toml | 3 +- commons/zenoh-codec/src/core/shm.rs | 134 ++- commons/zenoh-codec/src/transport/init.rs | 24 +- commons/zenoh-codec/src/transport/open.rs | 24 +- commons/zenoh-codec/src/zenoh/err.rs | 21 + commons/zenoh-codec/tests/codec.rs | 15 +- commons/zenoh-macros/src/lib.rs | 13 +- commons/zenoh-protocol/src/transport/init.rs | 7 + commons/zenoh-protocol/src/transport/open.rs | 26 +- commons/zenoh-protocol/src/zenoh/err.rs | 15 + commons/zenoh-shm/Cargo.toml | 22 +- commons/zenoh-shm/src/api/client/mod.rs | 16 + .../src/api/client/shared_memory_client.rs | 31 + .../src/api/client/shared_memory_segment.rs | 29 + .../zenoh-shm/src/api/client_storage/mod.rs | 163 ++++ commons/zenoh-shm/src/api/common/mod.rs | 15 + commons/zenoh-shm/src/api/common/types.rs | 27 + commons/zenoh-shm/src/api/mod.rs | 20 + .../src/api/protocol_implementations/mod.rs | 15 + .../api/protocol_implementations/posix/mod.rs | 19 + .../posix/posix_shared_memory_client.rs | 39 + .../posix_shared_memory_provider_backend.rs | 286 ++++++ .../posix/posix_shared_memory_segment.rs | 47 + .../posix/protocol_id.rs | 19 + commons/zenoh-shm/src/api/provider/chunk.rs | 53 + commons/zenoh-shm/src/api/provider/mod.rs | 18 + .../api/provider/shared_memory_provider.rs | 916 ++++++++++++++++++ .../shared_memory_provider_backend.rs | 52 + commons/zenoh-shm/src/api/provider/types.rs | 173 ++++ commons/zenoh-shm/src/api/slice/mod.rs | 17 + commons/zenoh-shm/src/api/slice/traits.rs | 24 + commons/zenoh-shm/src/api/slice/zsliceshm.rs | 172 ++++ .../zenoh-shm/src/api/slice/zsliceshmmut.rs | 189 ++++ .../src/header/allocated_descriptor.rs | 26 + commons/zenoh-shm/src/header/chunk_header.rs | 28 + commons/zenoh-shm/src/header/descriptor.rs | 63 ++ commons/zenoh-shm/src/header/mod.rs | 23 + commons/zenoh-shm/src/header/segment.rs | 40 + commons/zenoh-shm/src/header/storage.rs | 87 ++ commons/zenoh-shm/src/header/subscription.rs | 61 ++ commons/zenoh-shm/src/lib.rs | 516 +++------- commons/zenoh-shm/src/posix_shm/array.rs | 124 +++ commons/zenoh-shm/src/posix_shm/mod.rs | 16 + commons/zenoh-shm/src/posix_shm/segment.rs | 127 +++ commons/zenoh-shm/src/reader.rs | 147 +++ .../src/watchdog/allocated_watchdog.rs | 35 + commons/zenoh-shm/src/watchdog/confirmator.rs | 192 ++++ commons/zenoh-shm/src/watchdog/descriptor.rs | 116 +++ commons/zenoh-shm/src/watchdog/mod.rs | 24 + .../zenoh-shm/src/watchdog/periodic_task.rs | 100 ++ commons/zenoh-shm/src/watchdog/segment.rs | 41 + commons/zenoh-shm/src/watchdog/storage.rs | 76 ++ commons/zenoh-shm/src/watchdog/validator.rs | 102 ++ commons/zenoh-shm/tests/common/mod.rs | 105 ++ commons/zenoh-shm/tests/header.rs | 130 +++ commons/zenoh-shm/tests/periodic_task.rs | 172 ++++ commons/zenoh-shm/tests/posix_array.rs | 161 +++ commons/zenoh-shm/tests/posix_segment.rs | 136 +++ commons/zenoh-shm/tests/posix_shm_provider.rs | 117 +++ commons/zenoh-shm/tests/watchdog.rs | 311 ++++++ commons/zenoh-sync/src/object_pool.rs | 8 +- examples/Cargo.toml | 28 +- examples/examples/z_alloc_shm.rs | 136 +++ examples/examples/z_ping_shm.rs | 147 +++ examples/examples/z_pong.rs | 7 +- examples/examples/z_pub_shm.rs | 103 +- examples/examples/z_pub_shm_thr.rs | 54 +- examples/examples/z_sub_shm.rs | 6 +- io/zenoh-transport/Cargo.toml | 1 + io/zenoh-transport/src/common/batch.rs | 6 +- io/zenoh-transport/src/manager.rs | 42 +- io/zenoh-transport/src/multicast/link.rs | 6 +- io/zenoh-transport/src/multicast/manager.rs | 7 - io/zenoh-transport/src/multicast/mod.rs | 2 - io/zenoh-transport/src/multicast/rx.rs | 2 +- io/zenoh-transport/src/multicast/shm.rs | 44 - io/zenoh-transport/src/multicast/transport.rs | 12 + io/zenoh-transport/src/multicast/tx.rs | 10 +- io/zenoh-transport/src/shm.rs | 316 +++--- .../src/unicast/establishment/accept.rs | 82 +- .../src/unicast/establishment/ext/shm.rs | 331 ++++--- .../src/unicast/establishment/open.rs | 79 +- io/zenoh-transport/src/unicast/link.rs | 6 +- .../src/unicast/lowlatency/rx.rs | 4 +- .../src/unicast/lowlatency/transport.rs | 2 +- .../src/unicast/lowlatency/tx.rs | 10 +- io/zenoh-transport/src/unicast/manager.rs | 28 +- io/zenoh-transport/src/unicast/mod.rs | 4 +- .../src/unicast/shared_memory_unicast.rs | 57 -- .../src/unicast/universal/link.rs | 2 +- .../src/unicast/universal/rx.rs | 4 +- .../src/unicast/universal/transport.rs | 2 +- .../src/unicast/universal/tx.rs | 10 +- .../tests/multicast_compression.rs | 30 +- .../tests/multicast_transport.rs | 30 +- .../tests/unicast_authenticator.rs | 25 +- .../tests/unicast_compression.rs | 18 +- .../tests/unicast_concurrent.rs | 20 +- .../tests/unicast_defragmentation.rs | 5 +- .../tests/unicast_intermittent.rs | 23 +- io/zenoh-transport/tests/unicast_multilink.rs | 26 +- io/zenoh-transport/tests/unicast_openclose.rs | 2 +- .../tests/unicast_priorities.rs | 5 +- io/zenoh-transport/tests/unicast_shm.rs | 91 +- .../tests/unicast_simultaneous.rs | 4 +- io/zenoh-transport/tests/unicast_transport.rs | 9 +- zenoh-ext/Cargo.toml | 3 + zenoh/Cargo.toml | 1 + zenoh/src/bytes.rs | 838 +++++++++++++--- zenoh/src/encoding.rs | 12 +- zenoh/src/lib.rs | 35 +- zenoh/src/net/runtime/mod.rs | 62 +- zenoh/src/publication.rs | 8 +- zenoh/src/queryable.rs | 8 +- zenoh/src/sample/builder.rs | 6 +- zenoh/src/sample/mod.rs | 18 +- zenoh/src/session.rs | 23 +- zenoh/src/value.rs | 6 +- zenoh/tests/events.rs | 18 +- zenoh/tests/payload.rs | 97 ++ zenoh/tests/routing.rs | 12 +- zenoh/tests/shm.rs | 204 ++++ 128 files changed, 7727 insertions(+), 1394 deletions(-) create mode 100644 commons/zenoh-shm/src/api/client/mod.rs create mode 100644 commons/zenoh-shm/src/api/client/shared_memory_client.rs create mode 100644 commons/zenoh-shm/src/api/client/shared_memory_segment.rs create mode 100644 commons/zenoh-shm/src/api/client_storage/mod.rs create mode 100644 commons/zenoh-shm/src/api/common/mod.rs create mode 100644 commons/zenoh-shm/src/api/common/types.rs create mode 100644 commons/zenoh-shm/src/api/mod.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/mod.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs create mode 100644 commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs create mode 100644 commons/zenoh-shm/src/api/provider/chunk.rs create mode 100644 commons/zenoh-shm/src/api/provider/mod.rs create mode 100644 commons/zenoh-shm/src/api/provider/shared_memory_provider.rs create mode 100644 commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs create mode 100644 commons/zenoh-shm/src/api/provider/types.rs create mode 100644 commons/zenoh-shm/src/api/slice/mod.rs create mode 100644 commons/zenoh-shm/src/api/slice/traits.rs create mode 100644 commons/zenoh-shm/src/api/slice/zsliceshm.rs create mode 100644 commons/zenoh-shm/src/api/slice/zsliceshmmut.rs create mode 100644 commons/zenoh-shm/src/header/allocated_descriptor.rs create mode 100644 commons/zenoh-shm/src/header/chunk_header.rs create mode 100644 commons/zenoh-shm/src/header/descriptor.rs create mode 100644 commons/zenoh-shm/src/header/mod.rs create mode 100644 commons/zenoh-shm/src/header/segment.rs create mode 100644 commons/zenoh-shm/src/header/storage.rs create mode 100644 commons/zenoh-shm/src/header/subscription.rs create mode 100644 commons/zenoh-shm/src/posix_shm/array.rs create mode 100644 commons/zenoh-shm/src/posix_shm/mod.rs create mode 100644 commons/zenoh-shm/src/posix_shm/segment.rs create mode 100644 commons/zenoh-shm/src/reader.rs create mode 100644 commons/zenoh-shm/src/watchdog/allocated_watchdog.rs create mode 100644 commons/zenoh-shm/src/watchdog/confirmator.rs create mode 100644 commons/zenoh-shm/src/watchdog/descriptor.rs create mode 100644 commons/zenoh-shm/src/watchdog/mod.rs create mode 100644 commons/zenoh-shm/src/watchdog/periodic_task.rs create mode 100644 commons/zenoh-shm/src/watchdog/segment.rs create mode 100644 commons/zenoh-shm/src/watchdog/storage.rs create mode 100644 commons/zenoh-shm/src/watchdog/validator.rs create mode 100644 commons/zenoh-shm/tests/common/mod.rs create mode 100644 commons/zenoh-shm/tests/header.rs create mode 100644 commons/zenoh-shm/tests/periodic_task.rs create mode 100644 commons/zenoh-shm/tests/posix_array.rs create mode 100644 commons/zenoh-shm/tests/posix_segment.rs create mode 100644 commons/zenoh-shm/tests/posix_shm_provider.rs create mode 100644 commons/zenoh-shm/tests/watchdog.rs create mode 100644 examples/examples/z_alloc_shm.rs create mode 100644 examples/examples/z_ping_shm.rs delete mode 100644 io/zenoh-transport/src/multicast/shm.rs delete mode 100644 io/zenoh-transport/src/unicast/shared_memory_unicast.rs create mode 100644 zenoh/tests/payload.rs create mode 100644 zenoh/tests/shm.rs diff --git a/.config/nextest.toml b/.config/nextest.toml index aa2c3ac37b..79e299f524 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -8,9 +8,15 @@ slow-timeout = { period = "60s", terminate-after = 2 } filter = """ test(=zenoh_session_unicast) | test(=zenoh_session_multicast) | +test(=zenoh_unicity_p2p) | +test(=zenoh_unicity_brokered) | test(=transport_tcp_intermittent) | test(=transport_tcp_intermittent_for_lowlatency_transport) | -test(=three_node_combination) +test(=three_node_combination) | +test(=watchdog_alloc_concurrent) | +test(=header_check_memory_concurrent) | +test(=header_link_concurrent) | +test(=header_link_failure_concurrent) """ threads-required = 'num-cpus' slow-timeout = { period = "60s", terminate-after = 6 } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8ac80b747c..b28ea827c0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,6 +52,9 @@ jobs: - name: Clippy unstable targets run: cargo +stable clippy --all-targets --features unstable -- --deny warnings + - name: Clippy shared memory without unstable + run: cargo +stable clippy --all-targets --features shared-memory -- --deny warnings + - name: Clippy all features if: ${{ matrix.os == 'ubuntu-latest' || matrix.os == 'macOS-latest' }} run: cargo +stable clippy --all-targets --all-features -- --deny warnings @@ -92,8 +95,12 @@ jobs: run: cargo nextest run --exclude zenoh-examples --exclude zenoh-plugin-example --workspace - name: Run tests with SHM + if: ${{ matrix.os == 'macOS-latest' || matrix.os == 'windows-latest' }} + run: cargo nextest run -F shared-memory -F unstable -E 'not (test(test_default_features))' --exclude zenoh-examples --exclude zenoh-plugin-example --workspace + + - name: Run tests with SHM + unixpipe if: ${{ matrix.os == 'ubuntu-latest' }} - run: cargo nextest run -F shared-memory -F transport_unixpipe -p zenoh-transport + run: cargo nextest run -F shared-memory -F unstable -F transport_unixpipe -E 'not (test(test_default_features))' --exclude zenoh-examples --exclude zenoh-plugin-example --workspace - name: Check for feature leaks if: ${{ matrix.os == 'ubuntu-latest' }} diff --git a/Cargo.lock b/Cargo.lock index 8dd1450361..d009eb94cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1948,6 +1948,15 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "lockfree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74ee94b5ad113c7cb98c5a040f783d0952ee4fe100993881d1673c2cb002dd23" +dependencies = [ + "owned-alloc", +] + [[package]] name = "log" version = "0.4.20" @@ -2295,9 +2304,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -2376,6 +2385,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "owned-alloc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30fceb411f9a12ff9222c5f824026be368ff15dc2f13468d850c7d3f502205d6" + [[package]] name = "parking" version = "2.1.0" @@ -2691,6 +2706,15 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "proc-macro-crate" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +dependencies = [ + "toml_edit", +] + [[package]] name = "proc-macro-hack" version = "0.5.20+deprecated" @@ -3234,6 +3258,12 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + [[package]] name = "ryu" version = "1.0.15" @@ -3529,6 +3559,12 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha2-const-stable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f179d4e11094a893b82fff208f74d448a7512f99f5a0acbd5c679b705f83ed9" + [[package]] name = "sha3" version = "0.10.8" @@ -3680,6 +3716,42 @@ dependencies = [ "der", ] +[[package]] +name = "stabby" +version = "4.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ec04c5825384722310b6a1fd83023bee0bfdc838f7aa3069f0a59e10203836b" +dependencies = [ + "lazy_static", + "rustversion", + "stabby-abi", +] + +[[package]] +name = "stabby-abi" +version = "4.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976322da1deb6cc64a8406fd24378b840b1962acaac1978a993131c3838d81b3" +dependencies = [ + "libc", + "rustversion", + "sha2-const-stable", + "stabby-macros", +] + +[[package]] +name = "stabby-macros" +version = "4.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736712a13ab37b1fa6e073831efca751bbcb31033af4d7308bd5d9d605939183" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "rand 0.8.5", + "syn 1.0.109", +] + [[package]] name = "standback" version = "0.2.17" @@ -3917,6 +3989,20 @@ dependencies = [ "syn 2.0.52", ] +[[package]] +name = "thread-priority" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b72cb4958060ee2d9540cef68bb3871fd1e547037772c7fe7650d5d1cbec53b3" +dependencies = [ + "bitflags 1.3.2", + "cfg-if 1.0.0", + "libc", + "log", + "rustversion", + "winapi", +] + [[package]] name = "thread_local" version = "1.1.8" @@ -4173,6 +4259,23 @@ dependencies = [ "vsock", ] +[[package]] +name = "toml_datetime" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" + +[[package]] +name = "toml_edit" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + [[package]] name = "tower-service" version = "0.3.2" @@ -4911,6 +5014,15 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -5105,7 +5217,6 @@ dependencies = [ "zenoh", "zenoh-collections", "zenoh-ext", - "zenoh-shm", "zenoh-util", ] @@ -5539,11 +5650,26 @@ dependencies = [ name = "zenoh-shm" version = "0.11.0-dev" dependencies = [ + "async-trait", + "bincode", + "crc", + "lazy_static", + "libc", + "lockfree", + "num-traits", + "num_cpus", + "rand 0.8.5", "serde", "shared_memory", + "stabby", + "thread-priority", + "tokio", "tracing", "zenoh-buffers", + "zenoh-core", + "zenoh-macros", "zenoh-result", + "zenoh-shm", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7d5e230e4c..f00f625d0b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -112,9 +112,11 @@ lazy_static = "1.4.0" libc = "0.2.139" libloading = "0.8" tracing = "0.1" +lockfree = "0.5" lz4_flex = "0.11" nix = { version = "0.27", features = ["fs"] } num_cpus = "1.16.0" +num-traits = { version = "0.2.17", default-features = false } ordered-float = "4.1.1" panic-message = "0.3.0" paste = "1.0.12" @@ -146,6 +148,7 @@ serde_cbor = "0.11.2" serde_json = "1.0.114" serde-pickle = "1.1.1" serde_yaml = "0.9.19" +stabby = "4.0.5" sha3 = "0.10.6" shared_memory = "0.12.4" shellexpand = "3.0.0" @@ -159,6 +162,7 @@ tokio-util = "0.7.10" tokio-tungstenite = "0.21" tokio-rustls = "0.25.0" # tokio-vsock = see: io/zenoh-links/zenoh-link-vsock/Cargo.toml (workspaces does not support platform dependent dependencies) +thread-priority = "0.15" console-subscriber = "0.2" typenum = "1.16.0" uhlc = { version = "0.7.0", default-features = false } # Default features are disabled due to usage in no_std crates diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 676db8f7d0..2121d0ea34 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -22,22 +22,19 @@ async fn main() { let _z = zenoh_runtime::ZRuntimePoolGuard; - let queryable_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); + let queryable_key_expr = keyexpr::new("test/valgrind/data").unwrap(); let get_selector = Selector::try_from("test/valgrind/**").unwrap(); println!("Declaring Queryable on '{queryable_key_expr}'..."); let queryable_session = zenoh::open(Config::default()).res().await.unwrap(); let _queryable = queryable_session - .declare_queryable(&queryable_key_expr.clone()) + .declare_queryable(queryable_key_expr) .callback(move |query| { println!(">> Handling query '{}'", query.selector()); let queryable_key_expr = queryable_key_expr.clone(); zenoh_runtime::ZRuntime::Application.block_in_place(async move { query - .reply( - queryable_key_expr, - query.value().unwrap().payload().clone(), - ) + .reply(queryable_key_expr, query.value().unwrap().payload().clone()) .res() .await .unwrap(); diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 05c77cac7d..60dbdab5e1 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -28,46 +28,52 @@ use core::{ /*************************************/ /* ZSLICE BUFFER */ /*************************************/ -pub trait ZSliceBuffer: Send + Sync + fmt::Debug { +pub trait ZSliceBuffer: Any + Send + Sync + fmt::Debug { fn as_slice(&self) -> &[u8]; - fn as_mut_slice(&mut self) -> &mut [u8]; fn as_any(&self) -> &dyn Any; + fn as_any_mut(&mut self) -> &mut dyn Any; } impl ZSliceBuffer for Vec { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } impl ZSliceBuffer for Box<[u8]> { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } impl ZSliceBuffer for [u8; N] { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } /*************************************/ @@ -140,6 +146,15 @@ impl ZSlice { self.buf.as_any().downcast_ref::() } + #[inline] + #[must_use] + pub fn downcast_mut(&mut self) -> Option<&mut T> + where + T: Any, + { + Arc::get_mut(&mut self.buf).and_then(|val| val.as_any_mut().downcast_mut::()) + } + #[inline] #[must_use] pub const fn range(&self) -> Range { @@ -424,8 +439,9 @@ mod tests { assert_eq!(buf.as_slice(), zslice.as_slice()); let range = zslice.range(); - let mbuf = Arc::get_mut(&mut zslice.buf).unwrap(); - mbuf.as_mut_slice()[range][..buf.len()].clone_from_slice(&buf[..]); + let mut_slice = zslice.downcast_mut::>().unwrap(); + + mut_slice[range][..buf.len()].clone_from_slice(&buf[..]); assert_eq!(buf.as_slice(), zslice.as_slice()); } diff --git a/commons/zenoh-codec/Cargo.toml b/commons/zenoh-codec/Cargo.toml index 57fa34a4ab..209a4c698d 100644 --- a/commons/zenoh-codec/Cargo.toml +++ b/commons/zenoh-codec/Cargo.toml @@ -39,7 +39,8 @@ std = [ shared-memory = [ "std", "zenoh-shm", - "zenoh-protocol/shared-memory" + "zenoh-protocol/shared-memory", + "zenoh-buffers/shared-memory" ] [dependencies] diff --git a/commons/zenoh-codec/src/core/shm.rs b/commons/zenoh-codec/src/core/shm.rs index 69c5c59ce0..2548e4ed14 100644 --- a/commons/zenoh-codec/src/core/shm.rs +++ b/commons/zenoh-codec/src/core/shm.rs @@ -16,7 +16,50 @@ use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_shm::SharedMemoryBufInfo; +use zenoh_shm::{ + api::provider::chunk::ChunkDescriptor, header::descriptor::HeaderDescriptor, + watchdog::descriptor::Descriptor, SharedMemoryBufInfo, +}; + +impl WCodec<&Descriptor, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &Descriptor) -> Self::Output { + self.write(&mut *writer, x.id)?; + self.write(&mut *writer, x.index_and_bitpos)?; + Ok(()) + } +} + +impl WCodec<&HeaderDescriptor, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &HeaderDescriptor) -> Self::Output { + self.write(&mut *writer, x.id)?; + self.write(&mut *writer, x.index)?; + Ok(()) + } +} + +impl WCodec<&ChunkDescriptor, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &ChunkDescriptor) -> Self::Output { + self.write(&mut *writer, x.segment)?; + self.write(&mut *writer, x.chunk)?; + self.write(&mut *writer, x.len)?; + Ok(()) + } +} impl WCodec<&SharedMemoryBufInfo, &mut W> for Zenoh080 where @@ -26,20 +69,74 @@ where fn write(self, writer: &mut W, x: &SharedMemoryBufInfo) -> Self::Output { let SharedMemoryBufInfo { - offset, - length, - shm_manager, - kind, + data_descriptor, + shm_protocol, + data_len, + watchdog_descriptor, + header_descriptor, + generation, } = x; - self.write(&mut *writer, offset)?; - self.write(&mut *writer, length)?; - self.write(&mut *writer, shm_manager.as_str())?; - self.write(&mut *writer, kind)?; + self.write(&mut *writer, data_descriptor)?; + self.write(&mut *writer, shm_protocol)?; + self.write(&mut *writer, data_len)?; + self.write(&mut *writer, watchdog_descriptor)?; + self.write(&mut *writer, header_descriptor)?; + self.write(&mut *writer, generation)?; Ok(()) } } +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let id = self.read(&mut *reader)?; + let index_and_bitpos = self.read(&mut *reader)?; + + Ok(Descriptor { + id, + index_and_bitpos, + }) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let id = self.read(&mut *reader)?; + let index = self.read(&mut *reader)?; + + Ok(HeaderDescriptor { id, index }) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let segment = self.read(&mut *reader)?; + let chunk = self.read(&mut *reader)?; + let len = self.read(&mut *reader)?; + + Ok(ChunkDescriptor { + segment, + chunk, + len, + }) + } +} + impl RCodec for Zenoh080 where R: Reader, @@ -47,12 +144,21 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let offset: usize = self.read(&mut *reader)?; - let length: usize = self.read(&mut *reader)?; - let shm_manager: String = self.read(&mut *reader)?; - let kind: u8 = self.read(&mut *reader)?; + let data_descriptor = self.read(&mut *reader)?; + let shm_protocol = self.read(&mut *reader)?; + let data_len = self.read(&mut *reader)?; + let watchdog_descriptor = self.read(&mut *reader)?; + let header_descriptor = self.read(&mut *reader)?; + let generation = self.read(&mut *reader)?; - let shm_info = SharedMemoryBufInfo::new(offset, length, shm_manager, kind); + let shm_info = SharedMemoryBufInfo::new( + data_descriptor, + shm_protocol, + data_len, + watchdog_descriptor, + header_descriptor, + generation, + ); Ok(shm_info) } } diff --git a/commons/zenoh-codec/src/transport/init.rs b/commons/zenoh-codec/src/transport/init.rs index d3a92165ea..fec9f07afd 100644 --- a/commons/zenoh-codec/src/transport/init.rs +++ b/commons/zenoh-codec/src/transport/init.rs @@ -44,6 +44,7 @@ where resolution, batch_size, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -57,11 +58,16 @@ where header |= flag::S; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -91,6 +97,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -165,6 +172,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -181,6 +189,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -219,6 +228,7 @@ where resolution, batch_size, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -244,6 +254,7 @@ where batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -257,11 +268,16 @@ where header |= flag::S; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -294,6 +310,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -371,6 +388,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -387,6 +405,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -426,6 +445,7 @@ where batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-codec/src/transport/open.rs b/commons/zenoh-codec/src/transport/open.rs index f895942ea1..d539526715 100644 --- a/commons/zenoh-codec/src/transport/open.rs +++ b/commons/zenoh-codec/src/transport/open.rs @@ -40,6 +40,7 @@ where initial_sn, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -53,11 +54,16 @@ where header |= flag::T; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -77,6 +83,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -138,6 +145,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -154,6 +162,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -190,6 +199,7 @@ where initial_sn, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -211,6 +221,7 @@ where lease, initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -226,11 +237,16 @@ where header |= flag::T; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -249,6 +265,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -309,6 +326,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -325,6 +343,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -360,6 +379,7 @@ where lease, initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-codec/src/zenoh/err.rs b/commons/zenoh-codec/src/zenoh/err.rs index b459f67b3f..5291645bf0 100644 --- a/commons/zenoh-codec/src/zenoh/err.rs +++ b/commons/zenoh-codec/src/zenoh/err.rs @@ -37,6 +37,8 @@ where let Err { encoding, ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm, ext_unknown, payload, } = x; @@ -47,6 +49,10 @@ where header |= flag::E; } let mut n_exts = (ext_sinfo.is_some() as u8) + (ext_unknown.len() as u8); + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } if n_exts != 0 { header |= flag::Z; } @@ -62,6 +68,11 @@ where n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] + if let Some(eshm) = ext_shm.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (eshm, n_exts != 0))?; + } for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; @@ -107,6 +118,8 @@ where // Extensions let mut ext_sinfo: Option = None; + #[cfg(feature = "shared-memory")] + let mut ext_shm: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); @@ -119,6 +132,12 @@ where ext_sinfo = Some(s); has_ext = ext; } + #[cfg(feature = "shared-memory")] + ext::Shm::ID => { + let (s, ext): (ext::ShmType, bool) = eodec.read(&mut *reader)?; + ext_shm = Some(s); + has_ext = ext; + } _ => { let (u, ext) = extension::read(reader, "Err", ext)?; ext_unknown.push(u); @@ -134,6 +153,8 @@ where Ok(Err { encoding, ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm, ext_unknown, payload, }) diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index e6f6500843..e9b8140f21 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -360,15 +360,24 @@ fn codec_encoding() { #[cfg(feature = "shared-memory")] #[test] fn codec_shm_info() { - use zenoh_shm::SharedMemoryBufInfo; + use zenoh_shm::api::provider::chunk::ChunkDescriptor; + use zenoh_shm::header::descriptor::HeaderDescriptor; + use zenoh_shm::{watchdog::descriptor::Descriptor, SharedMemoryBufInfo}; run!(SharedMemoryBufInfo, { let mut rng = rand::thread_rng(); - let len = rng.gen_range(0..16); SharedMemoryBufInfo::new( + ChunkDescriptor::new(rng.gen(), rng.gen(), rng.gen()), rng.gen(), rng.gen(), - Alphanumeric.sample_string(&mut rng, len), + Descriptor { + id: rng.gen(), + index_and_bitpos: rng.gen(), + }, + HeaderDescriptor { + id: rng.gen(), + index: rng.gen(), + }, rng.gen(), ) }); diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index b77dffeba0..71184d4245 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -60,10 +60,9 @@ pub fn rustc_version_release(_tokens: TokenStream) -> TokenStream { } #[proc_macro_attribute] -pub fn unstable(_attr: TokenStream, item: TokenStream) -> TokenStream { +pub fn unstable_doc(_attr: TokenStream, item: TokenStream) -> TokenStream { let item = proc_macro2::TokenStream::from(item); TokenStream::from(quote! { - #[cfg(feature = "unstable")] ///

/// 🔬 /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. @@ -74,6 +73,16 @@ pub fn unstable(_attr: TokenStream, item: TokenStream) -> TokenStream { }) } +#[proc_macro_attribute] +pub fn unstable(_attr: TokenStream, item: TokenStream) -> TokenStream { + let item = proc_macro2::TokenStream::from(item); + TokenStream::from(quote! { + #[cfg(feature = "unstable")] + #[zenoh_macros::unstable_doc] + #item + }) +} + fn keformat_support(source: &str) -> proc_macro2::TokenStream { let format = match KeFormat::new(&source) { Ok(format) => format, diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index de517a353c..7e86d17af2 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -114,6 +114,7 @@ pub struct InitSyn { pub resolution: Resolution, pub batch_size: BatchSize, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -134,6 +135,7 @@ pub mod ext { /// # Shm extension /// Used as challenge for probing shared memory capabilities + #[cfg(feature = "shared-memory")] pub type Shm = zextzbuf!(0x2, false); /// # Auth extension @@ -167,6 +169,7 @@ impl InitSyn { let resolution = Resolution::rand(); let batch_size: BatchSize = rng.gen(); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -180,6 +183,7 @@ impl InitSyn { resolution, batch_size, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -198,6 +202,7 @@ pub struct InitAck { pub batch_size: BatchSize, pub cookie: ZSlice, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -224,6 +229,7 @@ impl InitAck { let batch_size: BatchSize = rng.gen(); let cookie = ZSlice::rand(64); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -238,6 +244,7 @@ impl InitAck { batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-protocol/src/transport/open.rs b/commons/zenoh-protocol/src/transport/open.rs index d793671b06..c643286193 100644 --- a/commons/zenoh-protocol/src/transport/open.rs +++ b/commons/zenoh-protocol/src/transport/open.rs @@ -78,6 +78,7 @@ pub struct OpenSyn { pub initial_sn: TransportSn, pub cookie: ZSlice, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -88,16 +89,22 @@ pub struct OpenSyn { // Extensions pub mod ext { use crate::{ - common::{ZExtUnit, ZExtZ64, ZExtZBuf}, - zextunit, zextz64, zextzbuf, + common::{ZExtUnit, ZExtZBuf}, + zextunit, zextzbuf, }; + #[cfg(feature = "shared-memory")] + use crate::common::ZExtZ64; + #[cfg(feature = "shared-memory")] + use crate::zextz64; + /// # QoS extension /// Used to negotiate the use of QoS pub type QoS = zextunit!(0x1, false); /// # Shm extension /// Used as challenge for probing shared memory capabilities + #[cfg(feature = "shared-memory")] pub type Shm = zextz64!(0x2, false); /// # Auth extension @@ -121,9 +128,12 @@ pub mod ext { impl OpenSyn { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZ64, ZExtZBuf}; + use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; + #[cfg(feature = "shared-memory")] + use crate::common::ZExtZ64; + const MIN: usize = 32; const MAX: usize = 1_024; @@ -138,6 +148,7 @@ impl OpenSyn { let initial_sn: TransportSn = rng.gen(); let cookie = ZSlice::rand(rng.gen_range(MIN..=MAX)); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZ64::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -149,6 +160,7 @@ impl OpenSyn { initial_sn, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -163,6 +175,7 @@ pub struct OpenAck { pub lease: Duration, pub initial_sn: TransportSn, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -173,9 +186,12 @@ pub struct OpenAck { impl OpenAck { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZ64, ZExtZBuf}; + use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; + #[cfg(feature = "shared-memory")] + use crate::common::ZExtZ64; + let mut rng = rand::thread_rng(); let lease = if rng.gen_bool(0.5) { @@ -186,6 +202,7 @@ impl OpenAck { let initial_sn: TransportSn = rng.gen(); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZ64::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); @@ -196,6 +213,7 @@ impl OpenAck { lease, initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-protocol/src/zenoh/err.rs b/commons/zenoh-protocol/src/zenoh/err.rs index eacbb26596..b6aa5f4954 100644 --- a/commons/zenoh-protocol/src/zenoh/err.rs +++ b/commons/zenoh-protocol/src/zenoh/err.rs @@ -44,17 +44,28 @@ pub mod flag { pub struct Err { pub encoding: Encoding, pub ext_sinfo: Option, + #[cfg(feature = "shared-memory")] + pub ext_shm: Option, pub ext_unknown: Vec, pub payload: ZBuf, } pub mod ext { + #[cfg(feature = "shared-memory")] + use crate::{common::ZExtUnit, zextunit}; use crate::{common::ZExtZBuf, zextzbuf}; /// # SourceInfo extension /// Used to carry additional information about the source of data pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; + + /// # Shared Memory extension + /// Used to carry additional information about the shared-memory layour of data + #[cfg(feature = "shared-memory")] + pub type Shm = zextunit!(0x2, true); + #[cfg(feature = "shared-memory")] + pub type ShmType = crate::zenoh::ext::ShmType<{ Shm::ID }>; } impl Err { @@ -66,6 +77,8 @@ impl Err { let encoding = Encoding::rand(); let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); + #[cfg(feature = "shared-memory")] + let ext_shm = rng.gen_bool(0.5).then_some(ext::ShmType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { ext_unknown.push(ZExtUnknown::rand2( @@ -78,6 +91,8 @@ impl Err { Self { encoding, ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm, ext_unknown, payload, } diff --git a/commons/zenoh-shm/Cargo.toml b/commons/zenoh-shm/Cargo.toml index e6107b9a13..60b9acde1d 100644 --- a/commons/zenoh-shm/Cargo.toml +++ b/commons/zenoh-shm/Cargo.toml @@ -28,9 +28,29 @@ categories = { workspace = true } description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +test = ["num_cpus"] + [dependencies] +async-trait = { workspace = true } +bincode = { workspace = true } +crc = { workspace = true } tracing = {workspace = true} serde = { workspace = true, features = ["default"] } shared_memory = { workspace = true } -zenoh-buffers = { workspace = true } +tokio = { workspace = true } zenoh-result = { workspace = true } +zenoh-core = { workspace = true } +zenoh-macros = { workspace = true } +zenoh-buffers = { workspace = true } +rand = { workspace = true } +lazy_static = { workspace = true } +num-traits = { workspace = true } +num_cpus = { workspace = true, optional = true } +thread-priority = { workspace = true } +lockfree = { workspace = true } +stabby = { workspace = true } + +[dev-dependencies] +zenoh-shm = { workspace = true, features = ["test"] } +libc = { workspace = true } diff --git a/commons/zenoh-shm/src/api/client/mod.rs b/commons/zenoh-shm/src/api/client/mod.rs new file mode 100644 index 0000000000..eab20733e7 --- /dev/null +++ b/commons/zenoh-shm/src/api/client/mod.rs @@ -0,0 +1,16 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod shared_memory_client; +pub mod shared_memory_segment; diff --git a/commons/zenoh-shm/src/api/client/shared_memory_client.rs b/commons/zenoh-shm/src/api/client/shared_memory_client.rs new file mode 100644 index 0000000000..abc7221300 --- /dev/null +++ b/commons/zenoh-shm/src/api/client/shared_memory_client.rs @@ -0,0 +1,31 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::fmt::Debug; + +use std::sync::Arc; + +use zenoh_result::ZResult; + +use crate::api::common::types::SegmentID; + +use super::shared_memory_segment::SharedMemorySegment; + +/// SharedMemoryClient - client factory implementation for particular shared memory protocol +#[zenoh_macros::unstable_doc] +pub trait SharedMemoryClient: Debug + Send + Sync { + /// Attach to particular shared memory segment + #[zenoh_macros::unstable_doc] + fn attach(&self, segment: SegmentID) -> ZResult>; +} diff --git a/commons/zenoh-shm/src/api/client/shared_memory_segment.rs b/commons/zenoh-shm/src/api/client/shared_memory_segment.rs new file mode 100644 index 0000000000..88eaf8761f --- /dev/null +++ b/commons/zenoh-shm/src/api/client/shared_memory_segment.rs @@ -0,0 +1,29 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::fmt::Debug; + +use std::sync::atomic::AtomicPtr; + +use zenoh_result::ZResult; + +use crate::api::common::types::ChunkID; + +/// SharedMemorySegment - RAII interface to interact with particular shared memory segment +#[zenoh_macros::unstable_doc] +pub trait SharedMemorySegment: Debug + Send + Sync { + /// Obtain the actual region of memory identified by it's id + #[zenoh_macros::unstable_doc] + fn map(&self, chunk: ChunkID) -> ZResult>; +} diff --git a/commons/zenoh-shm/src/api/client_storage/mod.rs b/commons/zenoh-shm/src/api/client_storage/mod.rs new file mode 100644 index 0000000000..0ce1a8af11 --- /dev/null +++ b/commons/zenoh-shm/src/api/client_storage/mod.rs @@ -0,0 +1,163 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use lazy_static::lazy_static; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +use zenoh_result::{bail, ZResult}; + +use crate::api::{ + client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }, + common::types::ProtocolID, + protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, protocol_id::POSIX_PROTOCOL_ID, + }, +}; + +use crate::reader::{ClientStorage, GlobalDataSegmentID}; + +lazy_static! { + /// A global lazily-initialized SHM client storage. + /// When initialized, contains default client set, + /// see SharedMemoryClientStorage::with_default_client_set + #[zenoh_macros::unstable_doc] + pub static ref GLOBAL_CLIENT_STORAGE: Arc = Arc::new( + SharedMemoryClientStorage::builder() + .with_default_client_set() + .build() + ); +} + +/// Builder to create new client storages +#[zenoh_macros::unstable_doc] +pub struct SharedMemoryClientSetBuilder; + +impl SharedMemoryClientSetBuilder { + /// Add client to the storage (without including the default client set) + #[zenoh_macros::unstable_doc] + pub fn with_client( + self, + id: ProtocolID, + client: Arc, + ) -> SharedMemoryClientStorageBuilder { + let clients = HashMap::from([(id, client)]); + SharedMemoryClientStorageBuilder::new(clients) + } + + /// Add list of clients to the storage (without including the default client set) + #[zenoh_macros::unstable_doc] + pub fn with_clients( + self, + clients: &[(ProtocolID, Arc)], + ) -> SharedMemoryClientStorageBuilder { + let clients = clients.iter().cloned().collect(); + SharedMemoryClientStorageBuilder::new(clients) + } + + /// Include default clients + #[zenoh_macros::unstable_doc] + pub fn with_default_client_set(self) -> SharedMemoryClientStorageBuilder { + let clients = HashMap::from([( + POSIX_PROTOCOL_ID, + Arc::new(PosixSharedMemoryClient {}) as Arc, + )]); + SharedMemoryClientStorageBuilder::new(clients) + } +} + +#[zenoh_macros::unstable_doc] +pub struct SharedMemoryClientStorageBuilder { + clients: HashMap>, +} + +impl SharedMemoryClientStorageBuilder { + fn new(clients: HashMap>) -> Self { + Self { clients } + } + + /// Add client to the storage + #[zenoh_macros::unstable_doc] + pub fn with_client( + mut self, + id: ProtocolID, + client: Arc, + ) -> ZResult { + match self.clients.entry(id) { + std::collections::hash_map::Entry::Occupied(occupied) => { + bail!("Client already exists for id {id}: {:?}!", occupied) + } + std::collections::hash_map::Entry::Vacant(vacant) => { + vacant.insert(client as Arc); + Ok(self) + } + } + } + + /// Add list of clients to the storage + #[zenoh_macros::unstable_doc] + pub fn with_clients(mut self, clients: &[(ProtocolID, Arc)]) -> Self { + self.clients.extend(clients.iter().cloned()); + self + } + + /// Build the storage with parameters specified on previous step + #[zenoh_macros::unstable_doc] + pub fn build(self) -> SharedMemoryClientStorage { + SharedMemoryClientStorage::new(self.clients) + } +} + +/// A storage for SHM clients. +/// Runtime or Session constructed with instance of this type gets capabilities to read +/// SHM buffers for Protocols added to this instance. +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct SharedMemoryClientStorage { + pub(crate) clients: ClientStorage>, + pub(crate) segments: RwLock>>, +} + +impl Eq for SharedMemoryClientStorage {} + +impl PartialEq for SharedMemoryClientStorage { + fn eq(&self, other: &Self) -> bool { + std::ptr::eq(self, other) + } +} + +impl SharedMemoryClientStorage { + /// Get the builder to construct a new storage + #[zenoh_macros::unstable_doc] + pub fn builder() -> SharedMemoryClientSetBuilder { + SharedMemoryClientSetBuilder + } + + /// Get the list of supported SHM protocols. + #[zenoh_macros::unstable_doc] + pub fn supported_protocols(&self) -> Vec { + self.clients.get_clients().keys().copied().collect() + } + + fn new(clients: HashMap>) -> Self { + Self { + clients: ClientStorage::new(clients), + segments: RwLock::default(), + } + } +} diff --git a/commons/zenoh-shm/src/api/common/mod.rs b/commons/zenoh-shm/src/api/common/mod.rs new file mode 100644 index 0000000000..222c7286bf --- /dev/null +++ b/commons/zenoh-shm/src/api/common/mod.rs @@ -0,0 +1,15 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod types; diff --git a/commons/zenoh-shm/src/api/common/types.rs b/commons/zenoh-shm/src/api/common/types.rs new file mode 100644 index 0000000000..02e009aff3 --- /dev/null +++ b/commons/zenoh-shm/src/api/common/types.rs @@ -0,0 +1,27 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +/// Unique protocol identifier. +/// Here is a contract: it is up to user to make sure that incompatible SharedMemoryClient +/// and SharedMemoryProviderBackend implementations will never use the same ProtocolID +#[zenoh_macros::unstable_doc] +pub type ProtocolID = u32; + +/// Unique segment identifier +#[zenoh_macros::unstable_doc] +pub type SegmentID = u32; + +/// Chunk id within it's segment +#[zenoh_macros::unstable_doc] +pub type ChunkID = u32; diff --git a/commons/zenoh-shm/src/api/mod.rs b/commons/zenoh-shm/src/api/mod.rs new file mode 100644 index 0000000000..08a5678fa8 --- /dev/null +++ b/commons/zenoh-shm/src/api/mod.rs @@ -0,0 +1,20 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod client; +pub mod client_storage; +pub mod common; +pub mod protocol_implementations; +pub mod provider; +pub mod slice; diff --git a/commons/zenoh-shm/src/api/protocol_implementations/mod.rs b/commons/zenoh-shm/src/api/protocol_implementations/mod.rs new file mode 100644 index 0000000000..df92f63536 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/mod.rs @@ -0,0 +1,15 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod posix; diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs new file mode 100644 index 0000000000..12c8aba0b6 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs @@ -0,0 +1,19 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod posix_shared_memory_client; +pub mod posix_shared_memory_provider_backend; +pub mod protocol_id; + +pub(crate) mod posix_shared_memory_segment; diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs new file mode 100644 index 0000000000..0184f50036 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs @@ -0,0 +1,39 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::Arc; + +use zenoh_result::ZResult; + +use crate::api::{ + client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }, + common::types::SegmentID, +}; + +use super::posix_shared_memory_segment::PosixSharedMemorySegment; + +/// Client factory implementation for particular shared memory protocol +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct PosixSharedMemoryClient; + +impl SharedMemoryClient for PosixSharedMemoryClient { + /// Attach to particular shared memory segment + #[zenoh_macros::unstable_doc] + fn attach(&self, segment: SegmentID) -> ZResult> { + Ok(Arc::new(PosixSharedMemorySegment::open(segment)?)) + } +} diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs new file mode 100644 index 0000000000..89c1b91387 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs @@ -0,0 +1,286 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + borrow::Borrow, + cmp, + collections::BinaryHeap, + sync::{ + atomic::{AtomicPtr, AtomicUsize, Ordering}, + Mutex, + }, +}; + +use zenoh_core::zlock; +use zenoh_result::ZResult; + +use crate::api::{ + common::types::ChunkID, + provider::{ + chunk::{AllocatedChunk, ChunkDescriptor}, + shared_memory_provider_backend::SharedMemoryProviderBackend, + types::{AllocAlignment, ChunkAllocResult, MemoryLayout, ZAllocError}, + }, +}; + +use super::posix_shared_memory_segment::PosixSharedMemorySegment; + +// TODO: MIN_FREE_CHUNK_SIZE limitation is made to reduce memory fragmentation and lower +// the CPU time needed to defragment() - that's reasonable, and there is additional thing here: +// our SHM\zerocopy functionality outperforms common buffer transmission only starting from 1K +// buffer size. In other words, there should be some minimal size threshold reasonable to use with +// SHM - and it would be good to synchronize this threshold with MIN_FREE_CHUNK_SIZE limitation! +const MIN_FREE_CHUNK_SIZE: usize = 1_024; + +#[derive(Eq, Copy, Clone, Debug)] +struct Chunk { + offset: ChunkID, + size: usize, +} + +impl Ord for Chunk { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.size.cmp(&other.size) + } +} + +impl PartialOrd for Chunk { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for Chunk { + fn eq(&self, other: &Self) -> bool { + self.size == other.size + } +} + +/// Builder to create posix SHM provider +#[zenoh_macros::unstable_doc] +pub struct PosixSharedMemoryProviderBackendBuilder; + +impl PosixSharedMemoryProviderBackendBuilder { + /// Use existing layout + #[zenoh_macros::unstable_doc] + pub fn with_layout>( + self, + layout: Layout, + ) -> LayoutedPosixSharedMemoryProviderBackendBuilder { + LayoutedPosixSharedMemoryProviderBackendBuilder { layout } + } + + /// Construct layout in-place using arguments + #[zenoh_macros::unstable_doc] + pub fn with_layout_args( + self, + size: usize, + alignment: AllocAlignment, + ) -> ZResult> { + let layout = MemoryLayout::new(size, alignment)?; + Ok(LayoutedPosixSharedMemoryProviderBackendBuilder { layout }) + } + + /// Construct layout in-place from size (default alignment will be used) + #[zenoh_macros::unstable_doc] + pub fn with_size( + self, + size: usize, + ) -> ZResult> { + let layout = MemoryLayout::new(size, AllocAlignment::default())?; + Ok(LayoutedPosixSharedMemoryProviderBackendBuilder { layout }) + } +} + +#[zenoh_macros::unstable_doc] +pub struct LayoutedPosixSharedMemoryProviderBackendBuilder> { + layout: Layout, +} + +impl> LayoutedPosixSharedMemoryProviderBackendBuilder { + /// try to create PosixSharedMemoryProviderBackend + #[zenoh_macros::unstable_doc] + pub fn res(self) -> ZResult { + PosixSharedMemoryProviderBackend::new(self.layout.borrow()) + } +} + +/// A backend for SharedMemoryProvider based on POSIX shared memory. +/// This is the default general-purpose backed shipped with Zenoh. +#[zenoh_macros::unstable_doc] +pub struct PosixSharedMemoryProviderBackend { + available: AtomicUsize, + segment: PosixSharedMemorySegment, + free_list: Mutex>, + alignment: AllocAlignment, +} + +impl PosixSharedMemoryProviderBackend { + /// Get the builder to construct a new instance + #[zenoh_macros::unstable_doc] + pub fn builder() -> PosixSharedMemoryProviderBackendBuilder { + PosixSharedMemoryProviderBackendBuilder + } + + fn new(layout: &MemoryLayout) -> ZResult { + let segment = PosixSharedMemorySegment::create(layout.size())?; + + let mut free_list = BinaryHeap::new(); + let root_chunk = Chunk { + offset: 0, + size: layout.size(), + }; + free_list.push(root_chunk); + + tracing::trace!( + "Created PosixSharedMemoryProviderBackend id {}, layout {:?}", + segment.segment.id(), + layout + ); + + Ok(Self { + available: AtomicUsize::new(layout.size()), + segment, + free_list: Mutex::new(free_list), + alignment: layout.alignment(), + }) + } +} + +impl SharedMemoryProviderBackend for PosixSharedMemoryProviderBackend { + fn alloc(&self, layout: &MemoryLayout) -> ChunkAllocResult { + tracing::trace!("PosixSharedMemoryProviderBackend::alloc({:?})", layout); + + let required_len = layout.size(); + + if self.available.load(Ordering::Relaxed) < required_len { + tracing::trace!( "PosixSharedMemoryProviderBackend does not have sufficient free memory to allocate {:?}, try de-fragmenting!", layout); + return Err(ZAllocError::OutOfMemory); + } + + let mut guard = zlock!(self.free_list); + // The strategy taken is the same for some Unix System V implementations -- as described in the + // famous Bach's book -- in essence keep an ordered list of free slot and always look for the + // biggest as that will give the biggest left-over. + match guard.pop() { + Some(mut chunk) if chunk.size >= required_len => { + // NOTE: don't loose any chunks here, as it will lead to memory leak + tracing::trace!("Allocator selected Chunk ({:?})", &chunk); + if chunk.size - required_len >= MIN_FREE_CHUNK_SIZE { + let free_chunk = Chunk { + offset: chunk.offset + required_len as ChunkID, + size: chunk.size - required_len, + }; + tracing::trace!("The allocation will leave a Free Chunk: {:?}", &free_chunk); + guard.push(free_chunk); + chunk.size = required_len; + } + self.available.fetch_sub(chunk.size, Ordering::Relaxed); + + let descriptor = + ChunkDescriptor::new(self.segment.segment.id(), chunk.offset, chunk.size); + + Ok(AllocatedChunk { + descriptor, + data: unsafe { AtomicPtr::new(self.segment.segment.elem_mut(chunk.offset)) }, + }) + } + Some(c) => { + tracing::trace!("PosixSharedMemoryProviderBackend::alloc({:?}) cannot find any big enough chunk\nSharedMemoryManager::free_list = {:?}", layout, self.free_list); + guard.push(c); + Err(ZAllocError::NeedDefragment) + } + None => { + // NOTE: that should never happen! If this happens - there is a critical bug somewhere around! + let err = format!("PosixSharedMemoryProviderBackend::alloc({:?}) cannot find any available chunk\nSharedMemoryManager::free_list = {:?}", layout, self.free_list); + #[cfg(feature = "test")] + panic!("{err}"); + #[cfg(not(feature = "test"))] + { + tracing::error!("{err}"); + Err(ZAllocError::OutOfMemory) + } + } + } + } + + fn free(&self, chunk: &ChunkDescriptor) { + let free_chunk = Chunk { + offset: chunk.chunk, + size: chunk.len, + }; + self.available.fetch_add(free_chunk.size, Ordering::Relaxed); + zlock!(self.free_list).push(free_chunk); + } + + fn defragment(&self) -> usize { + fn try_merge_adjacent_chunks(a: &Chunk, b: &Chunk) -> Option { + let end_offset = a.offset as usize + a.size; + if end_offset == b.offset as usize { + Some(Chunk { + size: a.size + b.size, + offset: a.offset, + }) + } else { + None + } + } + + let mut largest = 0usize; + + // TODO: optimize this! + // this is an old legacy algo for merging adjacent chunks + // we extract chunks to separate container, sort them by offset and then check each chunk for + // adjacence with neighbour. Adjacent chunks are joined and returned back to temporary container. + // If chunk is not adjacent with it's neighbour, it is placed back to self.free_list + let mut guard = zlock!(self.free_list); + if guard.len() > 1 { + let mut fbs: Vec = guard.drain().collect(); + fbs.sort_by(|x, y| x.offset.cmp(&y.offset)); + let mut current = fbs.remove(0); + let mut i = 0; + let n = fbs.len(); + for chunk in fbs.iter() { + i += 1; + let next = *chunk; + match try_merge_adjacent_chunks(¤t, &next) { + Some(c) => { + current = c; + largest = largest.max(current.size); + if i == n { + guard.push(current) + } + } + None => { + guard.push(current); + if i == n { + guard.push(next); + } else { + current = next; + } + } + } + } + } + largest + } + + fn available(&self) -> usize { + self.available.load(Ordering::Relaxed) + } + + fn layout_for(&self, layout: MemoryLayout) -> ZResult { + layout.extend(self.alignment) + } +} diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs new file mode 100644 index 0000000000..eb49d141ca --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs @@ -0,0 +1,47 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::AtomicPtr; + +use zenoh_result::ZResult; + +use crate::api::common::types::SegmentID; +use crate::api::{client::shared_memory_segment::SharedMemorySegment, common::types::ChunkID}; + +use crate::posix_shm::array::ArrayInSHM; + +const POSIX_SHM_SEGMENT_PREFIX: &str = "posix_shm_provider_segment"; + +#[derive(Debug)] +pub(crate) struct PosixSharedMemorySegment { + pub(crate) segment: ArrayInSHM, +} + +impl PosixSharedMemorySegment { + pub(crate) fn create(alloc_size: usize) -> ZResult { + let segment = ArrayInSHM::create(alloc_size, POSIX_SHM_SEGMENT_PREFIX)?; + Ok(Self { segment }) + } + + pub(crate) fn open(id: SegmentID) -> ZResult { + let segment = ArrayInSHM::open(id, POSIX_SHM_SEGMENT_PREFIX)?; + Ok(Self { segment }) + } +} + +impl SharedMemorySegment for PosixSharedMemorySegment { + fn map(&self, chunk: ChunkID) -> ZResult> { + unsafe { Ok(AtomicPtr::new(self.segment.elem_mut(chunk))) } + } +} diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs new file mode 100644 index 0000000000..b2eec8d7a5 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs @@ -0,0 +1,19 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use crate::api::common::types::ProtocolID; + +/// Protocol identifier to use when creating SharedMemoryProvider +#[zenoh_macros::unstable_doc] +pub const POSIX_PROTOCOL_ID: ProtocolID = 0; diff --git a/commons/zenoh-shm/src/api/provider/chunk.rs b/commons/zenoh-shm/src/api/provider/chunk.rs new file mode 100644 index 0000000000..939758a345 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/chunk.rs @@ -0,0 +1,53 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::AtomicPtr; + +use crate::api::common::types::{ChunkID, SegmentID}; + +/// Uniquely identifies the particular chunk within particular segment +#[zenoh_macros::unstable_doc] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ChunkDescriptor { + pub segment: SegmentID, + pub chunk: ChunkID, + pub len: usize, +} + +impl ChunkDescriptor { + /// Create a new Chunk Descriptor + #[zenoh_macros::unstable_doc] + pub fn new(segment: SegmentID, chunk: ChunkID, len: usize) -> Self { + Self { + segment, + chunk, + len, + } + } +} + +/// A recently-allocated chunk. +#[zenoh_macros::unstable_doc] +pub struct AllocatedChunk { + pub descriptor: ChunkDescriptor, + pub data: AtomicPtr, +} + +impl AllocatedChunk { + /// Create a new Allocated Chunk + #[zenoh_macros::unstable_doc] + pub fn new(descriptor: ChunkDescriptor, data: AtomicPtr) -> Self { + Self { descriptor, data } + } +} diff --git a/commons/zenoh-shm/src/api/provider/mod.rs b/commons/zenoh-shm/src/api/provider/mod.rs new file mode 100644 index 0000000000..a769baacb3 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/mod.rs @@ -0,0 +1,18 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod chunk; +pub mod shared_memory_provider; +pub mod shared_memory_provider_backend; +pub mod types; diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs new file mode 100644 index 0000000000..c3b8128300 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -0,0 +1,916 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + collections::VecDeque, + marker::PhantomData, + sync::{atomic::Ordering, Arc, Mutex}, + time::Duration, +}; + +use async_trait::async_trait; +use zenoh_result::ZResult; + +use crate::{ + api::{common::types::ProtocolID, slice::zsliceshmmut::ZSliceShmMut}, + header::{ + allocated_descriptor::AllocatedHeaderDescriptor, descriptor::HeaderDescriptor, + storage::GLOBAL_HEADER_STORAGE, + }, + watchdog::{ + allocated_watchdog::AllocatedWatchdog, + confirmator::{ConfirmedDescriptor, GLOBAL_CONFIRMATOR}, + descriptor::Descriptor, + storage::GLOBAL_STORAGE, + validator::GLOBAL_VALIDATOR, + }, + SharedMemoryBuf, SharedMemoryBufInfo, +}; + +use super::{ + chunk::{AllocatedChunk, ChunkDescriptor}, + shared_memory_provider_backend::SharedMemoryProviderBackend, + types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, +}; + +#[derive(Debug)] +struct BusyChunk { + descriptor: ChunkDescriptor, + header: AllocatedHeaderDescriptor, + _watchdog: AllocatedWatchdog, +} + +impl BusyChunk { + fn new( + descriptor: ChunkDescriptor, + header: AllocatedHeaderDescriptor, + watchdog: AllocatedWatchdog, + ) -> Self { + Self { + descriptor, + header, + _watchdog: watchdog, + } + } +} + +/// Builder to create AllocLayout +#[zenoh_macros::unstable_doc] +pub struct AllocLayoutBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + provider: &'a SharedMemoryProvider, +} +impl<'a, IDSource, Backend> AllocLayoutBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Set size for layout + #[zenoh_macros::unstable_doc] + pub fn size(self, size: usize) -> AllocLayoutSizedBuilder<'a, IDSource, Backend> { + AllocLayoutSizedBuilder { + provider: self.provider, + size, + } + } +} + +#[zenoh_macros::unstable_doc] +pub struct AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + provider: &'a SharedMemoryProvider, + size: usize, +} +impl<'a, IDSource, Backend> AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Set alignment for layout + #[zenoh_macros::unstable_doc] + pub fn alignment( + self, + alignment: AllocAlignment, + ) -> AllocLayoutAlignedBuilder<'a, IDSource, Backend> { + AllocLayoutAlignedBuilder { + provider: self.provider, + size: self.size, + alignment, + } + } + + /// try to build an allocation layout + #[zenoh_macros::unstable_doc] + pub fn res(self) -> ZResult> { + AllocLayout::new(self.size, AllocAlignment::default(), self.provider) + } +} + +#[zenoh_macros::unstable_doc] +pub struct AllocLayoutAlignedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + provider: &'a SharedMemoryProvider, + size: usize, + alignment: AllocAlignment, +} +impl<'a, IDSource, Backend> AllocLayoutAlignedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Try to build layout with specified args + #[zenoh_macros::unstable_doc] + pub fn res(self) -> ZResult> { + AllocLayout::new(self.size, self.alignment, self.provider) + } +} + +/// A layout for allocations. +/// This is a pre-calculated layout suitable for making series of similar allocations +/// adopted for particular SharedMemoryProvider +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct AllocLayout<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + size: usize, + provider_layout: MemoryLayout, + provider: &'a SharedMemoryProvider, +} + +impl<'a, IDSource, Backend> AllocLayout<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Allocate the new buffer with this layout + #[zenoh_macros::unstable_doc] + pub fn alloc(&'a self) -> AllocBuilder<'a, IDSource, Backend> { + AllocBuilder { + layout: self, + _phantom: PhantomData, + } + } + + fn new( + size: usize, + alignment: AllocAlignment, + provider: &'a SharedMemoryProvider, + ) -> ZResult { + // NOTE: Depending on internal implementation, provider's backend might relayout + // the allocations for bigger alignment (ex. 4-byte aligned allocation to 8-bytes aligned) + + // Create layout for specified arguments + let layout = MemoryLayout::new(size, alignment)?; + + // Obtain provider's layout for our layout + let provider_layout = provider.backend.layout_for(layout)?; + + Ok(Self { + size, + provider_layout, + provider, + }) + } +} + +/// Trait for deallocation policies. +#[zenoh_macros::unstable_doc] +pub trait ForceDeallocPolicy { + fn dealloc( + provider: &SharedMemoryProvider, + ) -> bool; +} + +/// Try to dealloc optimal (currently eldest+1) chunk +#[zenoh_macros::unstable_doc] +pub struct DeallocOptimal; +impl ForceDeallocPolicy for DeallocOptimal { + fn dealloc( + provider: &SharedMemoryProvider, + ) -> bool { + let mut guard = provider.busy_list.lock().unwrap(); + let chunk_to_dealloc = match guard.remove(1) { + Some(val) => val, + None => match guard.pop_front() { + Some(val) => val, + None => return false, + }, + }; + drop(guard); + + provider.backend.free(&chunk_to_dealloc.descriptor); + true + } +} + +/// Try to dealloc youngest chunk +#[zenoh_macros::unstable_doc] +pub struct DeallocYoungest; +impl ForceDeallocPolicy for DeallocYoungest { + fn dealloc( + provider: &SharedMemoryProvider, + ) -> bool { + match provider.busy_list.lock().unwrap().pop_back() { + Some(val) => { + provider.backend.free(&val.descriptor); + true + } + None => false, + } + } +} + +/// Try to dealloc eldest chunk +#[zenoh_macros::unstable_doc] +pub struct DeallocEldest; +impl ForceDeallocPolicy for DeallocEldest { + fn dealloc( + provider: &SharedMemoryProvider, + ) -> bool { + match provider.busy_list.lock().unwrap().pop_front() { + Some(val) => { + provider.backend.free(&val.descriptor); + true + } + None => false, + } + } +} + +/// Trait for allocation policies +#[zenoh_macros::unstable_doc] +pub trait AllocPolicy { + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult; +} + +/// Trait for async allocation policies +#[zenoh_macros::unstable_doc] +#[async_trait] +pub trait AsyncAllocPolicy { + async fn alloc_async< + IDSource: ProtocolIDSource + Send + Sync, + Backend: SharedMemoryProviderBackend + Sync, + >( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult; +} + +/// Just try to allocate +#[zenoh_macros::unstable_doc] +pub struct JustAlloc; +impl AllocPolicy for JustAlloc { + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + provider.backend.alloc(layout) + } +} + +/// Garbage collection policy. +/// Try to reclaim old buffers if allocation failed and allocate again +/// if the largest reclaimed chuk is not smaller than the one required +#[zenoh_macros::unstable_doc] +pub struct GarbageCollect +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + _phantom: PhantomData, + _phantom2: PhantomData, +} +impl AllocPolicy for GarbageCollect +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + let result = InnerPolicy::alloc(layout, provider); + if let Err(ZAllocError::OutOfMemory) = result { + // try to alloc again only if GC managed to reclaim big enough chunk + if provider.garbage_collect() >= layout.size() { + return AltPolicy::alloc(layout, provider); + } + } + result + } +} + +/// Defragmenting policy. +/// Try to defragment if allocation failed and allocate again +/// if the largest defragmented chuk is not smaller than the one required +#[zenoh_macros::unstable_doc] +pub struct Defragment +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + _phantom: PhantomData, + _phantom2: PhantomData, +} +impl AllocPolicy for Defragment +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + let result = InnerPolicy::alloc(layout, provider); + if let Err(ZAllocError::NeedDefragment) = result { + // try to alloc again only if big enough chunk was defragmented + if provider.defragment() >= layout.size() { + return AltPolicy::alloc(layout, provider); + } + } + result + } +} + +/// Deallocating policy. +/// Forcely deallocate up to N buffers until allocation succeeds. +#[zenoh_macros::unstable_doc] +pub struct Deallocate< + const N: usize, + InnerPolicy = JustAlloc, + AltPolicy = InnerPolicy, + DeallocatePolicy = DeallocOptimal, +> where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, + DeallocatePolicy: ForceDeallocPolicy, +{ + _phantom: PhantomData, + _phantom2: PhantomData, + _phantom3: PhantomData, +} +impl AllocPolicy + for Deallocate +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, + DeallocatePolicy: ForceDeallocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + let mut result = InnerPolicy::alloc(layout, provider); + for _ in 0..N { + match result { + Err(ZAllocError::NeedDefragment) | Err(ZAllocError::OutOfMemory) => { + if !DeallocatePolicy::dealloc(provider) { + return result; + } + } + _ => { + return result; + } + } + result = AltPolicy::alloc(layout, provider); + } + result + } +} + +/// Blocking allocation policy. +/// This policy will block until the allocation succeeds. +/// Both sync and async modes available. +#[zenoh_macros::unstable_doc] +pub struct BlockOn +where + InnerPolicy: AllocPolicy, +{ + _phantom: PhantomData, +} +#[async_trait] +impl AsyncAllocPolicy for BlockOn +where + InnerPolicy: AllocPolicy, +{ + async fn alloc_async< + IDSource: ProtocolIDSource + Send + Sync, + Backend: SharedMemoryProviderBackend + Sync, + >( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + loop { + match InnerPolicy::alloc(layout, provider) { + Err(ZAllocError::NeedDefragment) | Err(ZAllocError::OutOfMemory) => { + // TODO: implement provider's async signalling instead of this! + tokio::time::sleep(Duration::from_millis(1)).await; + } + other_result => { + return other_result; + } + } + } + } +} +impl AllocPolicy for BlockOn +where + InnerPolicy: AllocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &SharedMemoryProvider, + ) -> ChunkAllocResult { + loop { + match InnerPolicy::alloc(layout, provider) { + Err(ZAllocError::NeedDefragment) | Err(ZAllocError::OutOfMemory) => { + // TODO: implement provider's async signalling instead of this! + std::thread::sleep(Duration::from_millis(1)); + } + other_result => { + return other_result; + } + } + } + } +} + +// TODO: allocator API +/*pub struct ShmAllocator< + 'a, + Policy: AllocPolicy, + IDSource, + Backend: SharedMemoryProviderBackend, +> { + provider: &'a SharedMemoryProvider, + allocations: lockfree::map::Map, SharedMemoryBuf>, + _phantom: PhantomData, +} + +impl<'a, Policy: AllocPolicy, IDSource, Backend: SharedMemoryProviderBackend> + ShmAllocator<'a, Policy, IDSource, Backend> +{ + fn allocate(&self, layout: std::alloc::Layout) -> BufAllocResult { + self.provider + .alloc_layout() + .size(layout.size()) + .alignment(AllocAlignment::new(layout.align() as u32)) + .res()? + .alloc() + .res() + } +} + +unsafe impl<'a, Policy: AllocPolicy, IDSource, Backend: SharedMemoryProviderBackend> + allocator_api2::alloc::Allocator for ShmAllocator<'a, Policy, IDSource, Backend> +{ + fn allocate( + &self, + layout: std::alloc::Layout, + ) -> Result, allocator_api2::alloc::AllocError> { + let allocation = self + .allocate(layout) + .map_err(|_| allocator_api2::alloc::AllocError)?; + + let inner = allocation.buf.load(Ordering::Relaxed); + let ptr = NonNull::new(inner).ok_or(allocator_api2::alloc::AllocError)?; + let sl = unsafe { std::slice::from_raw_parts(inner, 2) }; + let res = NonNull::from(sl); + + self.allocations.insert(ptr, allocation); + Ok(res) + } + + unsafe fn deallocate(&self, ptr: std::ptr::NonNull, _layout: std::alloc::Layout) { + let _ = self.allocations.remove(&ptr); + } +}*/ + +/// Builder for allocations +#[zenoh_macros::unstable_doc] +pub struct AllocBuilder< + 'a, + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, + Policy = JustAlloc, +> { + layout: &'a AllocLayout<'a, IDSource, Backend>, + _phantom: PhantomData, +} + +// Generic impl +impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Set the allocation policy + #[zenoh_macros::unstable_doc] + pub fn with_policy(self) -> AllocBuilder<'a, IDSource, Backend, OtherPolicy> { + AllocBuilder { + layout: self.layout, + _phantom: PhantomData, + } + } +} + +// Alloc policy +impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, + Policy: AllocPolicy, +{ + /// Get the result + #[zenoh_macros::unstable_doc] + pub fn res(self) -> BufAllocResult { + self.layout + .provider + .alloc_inner::(self.layout.size, &self.layout.provider_layout) + } +} + +// Async Alloc policy +impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource + Send + Sync, + Backend: SharedMemoryProviderBackend + Sync, + Policy: AsyncAllocPolicy, +{ + /// Get the async result + #[zenoh_macros::unstable_doc] + pub async fn res_async(self) -> BufAllocResult { + self.layout + .provider + .alloc_inner_async::(self.layout.size, &self.layout.provider_layout) + .await + } +} + +#[zenoh_macros::unstable_doc] +pub struct SharedMemoryProviderBuilder; +impl SharedMemoryProviderBuilder { + /// Get the builder to construct SharedMemoryProvider + #[zenoh_macros::unstable_doc] + pub fn builder() -> Self { + Self + } + + /// Set compile-time-evaluated protocol ID (preferred) + #[zenoh_macros::unstable_doc] + pub fn protocol_id( + self, + ) -> SharedMemoryProviderBuilderID> { + SharedMemoryProviderBuilderID::> { + id: StaticProtocolID, + } + } + + /// Set runtime-evaluated protocol ID + #[zenoh_macros::unstable_doc] + pub fn dynamic_protocol_id( + self, + id: ProtocolID, + ) -> SharedMemoryProviderBuilderID { + SharedMemoryProviderBuilderID:: { + id: DynamicProtocolID::new(id), + } + } +} + +#[zenoh_macros::unstable_doc] +pub struct SharedMemoryProviderBuilderID { + id: IDSource, +} +impl SharedMemoryProviderBuilderID { + /// Set the backend + #[zenoh_macros::unstable_doc] + pub fn backend( + self, + backend: Backend, + ) -> SharedMemoryProviderBuilderBackendID { + SharedMemoryProviderBuilderBackendID { + backend, + id: self.id, + } + } +} + +#[zenoh_macros::unstable_doc] +pub struct SharedMemoryProviderBuilderBackendID +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + backend: Backend, + id: IDSource, +} +impl SharedMemoryProviderBuilderBackendID +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// build SharedMemoryProvider + #[zenoh_macros::unstable_doc] + pub fn res(self) -> SharedMemoryProvider { + SharedMemoryProvider::new(self.backend, self.id) + } +} + +/// Trait to create ProtocolID sources for SharedMemoryProvider +#[zenoh_macros::unstable_doc] +pub trait ProtocolIDSource { + fn id(&self) -> ProtocolID; +} + +/// Static ProtocolID source. This is a recommended API to set ProtocolID +/// when creating SharedMemoryProvider as the ID value is statically evaluated +/// at compile-time and can be optimized. +#[zenoh_macros::unstable_doc] +#[derive(Default)] +pub struct StaticProtocolID; +impl ProtocolIDSource for StaticProtocolID { + fn id(&self) -> ProtocolID { + ID + } +} + +/// Dynamic ProtocolID source. This is an alternative API to set ProtocolID +/// when creating SharedMemoryProvider for cases where ProtocolID is unknown +/// at compile-time. +#[zenoh_macros::unstable_doc] +pub struct DynamicProtocolID { + id: ProtocolID, +} +impl DynamicProtocolID { + #[zenoh_macros::unstable_doc] + pub fn new(id: ProtocolID) -> Self { + Self { id } + } +} +impl ProtocolIDSource for DynamicProtocolID { + fn id(&self) -> ProtocolID { + self.id + } +} +unsafe impl Send for DynamicProtocolID {} +unsafe impl Sync for DynamicProtocolID {} + +/// A generalized interface for shared memory data sources +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct SharedMemoryProvider +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + backend: Backend, + busy_list: Mutex>, + id: IDSource, +} + +impl SharedMemoryProvider +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Create layout builder associated with particular SharedMemoryProvider. + /// Layout is a rich interface to make allocations + #[zenoh_macros::unstable_doc] + pub fn alloc_layout(&self) -> AllocLayoutBuilder { + AllocLayoutBuilder { provider: self } + } + + /// Defragment memory + #[zenoh_macros::unstable_doc] + pub fn defragment(&self) -> usize { + self.backend.defragment() + } + + /// Map externally-allocated chunk into ZSliceShmMut. + /// This method is designed to be used with push data sources. + /// Remember that chunk's len may be >= len! + #[zenoh_macros::unstable_doc] + pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { + // allocate resources for SHM buffer + let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; + + // wrap everything to SharedMemoryBuf + let wrapped = self.wrap( + chunk, + len, + allocated_header, + allocated_watchdog, + confirmed_watchdog, + ); + Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + } + + /// Try to collect free chunks. + /// Returns the size of largest collected chunk + #[zenoh_macros::unstable_doc] + pub fn garbage_collect(&self) -> usize { + fn is_free_chunk(chunk: &BusyChunk) -> bool { + let header = chunk.header.descriptor.header(); + if header.refcount.load(Ordering::SeqCst) != 0 { + return header.watchdog_invalidated.load(Ordering::SeqCst); + } + true + } + + tracing::trace!("Running Garbage Collector"); + + let mut largest = 0usize; + let mut guard = self.busy_list.lock().unwrap(); + guard.retain(|maybe_free| { + if is_free_chunk(maybe_free) { + tracing::trace!("Garbage Collecting Chunk: {:?}", maybe_free); + self.backend.free(&maybe_free.descriptor); + largest = largest.max(maybe_free.descriptor.len); + return false; + } + true + }); + drop(guard); + + largest + } + + /// Bytes available for use + #[zenoh_macros::unstable_doc] + pub fn available(&self) -> usize { + self.backend.available() + } +} + +// PRIVATE impls +impl SharedMemoryProvider +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + fn new(backend: Backend, id: IDSource) -> Self { + Self { + backend, + busy_list: Mutex::new(VecDeque::default()), + id, + } + } + + fn alloc_inner(&self, size: usize, layout: &MemoryLayout) -> BufAllocResult + where + Policy: AllocPolicy, + { + // allocate resources for SHM buffer + let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; + + // allocate data chunk + // Perform actions depending on the Policy + // NOTE: it is necessary to properly map this chunk OR free it if mapping fails! + // Don't loose this chunk as it leads to memory leak at the backend side! + // NOTE: self.backend.alloc(len) returns chunk with len >= required len, + // and it is necessary to handle that properly and pass this len to corresponding free(...) + let chunk = Policy::alloc(layout, self)?; + + // wrap allocated chunk to SharedMemoryBuf + let wrapped = self.wrap( + chunk, + size, + allocated_header, + allocated_watchdog, + confirmed_watchdog, + ); + Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + } + + fn alloc_resources() -> ZResult<( + AllocatedHeaderDescriptor, + AllocatedWatchdog, + ConfirmedDescriptor, + )> { + // allocate shared header + let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + + // allocate watchdog + let allocated_watchdog = GLOBAL_STORAGE.allocate_watchdog()?; + + // add watchdog to confirmator + let confirmed_watchdog = GLOBAL_CONFIRMATOR.add_owned(&allocated_watchdog.descriptor)?; + + Ok((allocated_header, allocated_watchdog, confirmed_watchdog)) + } + + fn wrap( + &self, + chunk: AllocatedChunk, + len: usize, + allocated_header: AllocatedHeaderDescriptor, + allocated_watchdog: AllocatedWatchdog, + confirmed_watchdog: ConfirmedDescriptor, + ) -> SharedMemoryBuf { + let header = allocated_header.descriptor.clone(); + let descriptor = Descriptor::from(&allocated_watchdog.descriptor); + + // add watchdog to validator + let c_header = header.clone(); + GLOBAL_VALIDATOR.add( + allocated_watchdog.descriptor.clone(), + Box::new(move || { + c_header + .header() + .watchdog_invalidated + .store(true, Ordering::SeqCst); + }), + ); + + // Create buffer's info + let info = SharedMemoryBufInfo::new( + chunk.descriptor.clone(), + self.id.id(), + len, + descriptor, + HeaderDescriptor::from(&header), + header.header().generation.load(Ordering::SeqCst), + ); + + // Create buffer + let shmb = SharedMemoryBuf { + header, + buf: chunk.data, + info, + watchdog: Arc::new(confirmed_watchdog), + }; + + // Create and store busy chunk + self.busy_list.lock().unwrap().push_back(BusyChunk::new( + chunk.descriptor, + allocated_header, + allocated_watchdog, + )); + + shmb + } +} + +// PRIVATE impls for Sync backend +impl SharedMemoryProvider +where + IDSource: ProtocolIDSource + Send + Sync, + Backend: SharedMemoryProviderBackend + Sync, +{ + async fn alloc_inner_async( + &self, + size: usize, + backend_layout: &MemoryLayout, + ) -> BufAllocResult + where + Policy: AsyncAllocPolicy, + { + // allocate resources for SHM buffer + let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; + + // allocate data chunk + // Perform actions depending on the Policy + // NOTE: it is necessary to properly map this chunk OR free it if mapping fails! + // Don't loose this chunk as it leads to memory leak at the backend side! + // NOTE: self.backend.alloc(len) returns chunk with len >= required len, + // and it is necessary to handle that properly and pass this len to corresponding free(...) + let chunk = Policy::alloc_async(backend_layout, self).await?; + + // wrap allocated chunk to SharedMemoryBuf + let wrapped = self.wrap( + chunk, + size, + allocated_header, + allocated_watchdog, + confirmed_watchdog, + ); + Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + } +} diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs new file mode 100644 index 0000000000..cd15ce3720 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider_backend.rs @@ -0,0 +1,52 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use zenoh_result::ZResult; + +use super::{ + chunk::ChunkDescriptor, + types::{ChunkAllocResult, MemoryLayout}, +}; + +/// The provider backend trait +/// Implemet this interface to create a Zenoh-compatible shared memory provider +#[zenoh_macros::unstable_doc] +pub trait SharedMemoryProviderBackend { + /// Allocate the chunk of desired size. + /// If successful, the result's chunk size will be >= len + #[zenoh_macros::unstable_doc] + fn alloc(&self, layout: &MemoryLayout) -> ChunkAllocResult; + + /// Deallocate the chunk. + /// It is guaranteed that chunk's descriptor will correspond to the one returned from alloc(...) + #[zenoh_macros::unstable_doc] + fn free(&self, chunk: &ChunkDescriptor); + + /// Defragment the memory. + /// Should return the size of largest defragmented chunk + #[zenoh_macros::unstable_doc] + fn defragment(&self) -> usize; + + /// Bytes available for use + #[zenoh_macros::unstable_doc] + fn available(&self) -> usize; + + /// Check and calculate suitable layout for layout. + /// Depending on the implementation, backend may relayout allocations for bigger layouts. + /// This method is used to: + /// - validate, if the provided layout can be used with this backend + /// - adopt the layout for backend capabilities + #[zenoh_macros::unstable_doc] + fn layout_for(&self, layout: MemoryLayout) -> ZResult; +} diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs new file mode 100644 index 0000000000..662482f567 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -0,0 +1,173 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::fmt::Display; + +use zenoh_result::{bail, ZResult}; + +use crate::api::slice::zsliceshmmut::ZSliceShmMut; + +use super::chunk::AllocatedChunk; + +/// Allocation errors +/// +/// NeedDefragment: defragmentation needed +/// OutOfMemory: the provider is out of memory +/// Other: other error +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub enum ZAllocError { + NeedDefragment, + OutOfMemory, + Other(zenoh_result::Error), +} + +impl From for ZAllocError { + fn from(value: zenoh_result::Error) -> Self { + Self::Other(value) + } +} + +/// alignemnt in powers of 2: 0 == 1-byte alignment, 1 == 2byte, 2 == 4byte, 3 == 8byte etc +#[zenoh_macros::unstable_doc] +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct AllocAlignment { + pow: u8, +} + +impl Display for AllocAlignment { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!("[{}]", self.get_alignment_value())) + } +} + +impl Default for AllocAlignment { + fn default() -> Self { + Self { + pow: (std::mem::align_of::() as f64).log2().round() as u8, + } + } +} + +impl AllocAlignment { + #[zenoh_macros::unstable_doc] + pub fn new(pow: u8) -> Self { + Self { pow } + } + + /// Get alignment in normal units (bytes) + #[zenoh_macros::unstable_doc] + pub fn get_alignment_value(&self) -> usize { + 1usize << self.pow + } + + /// Align size according to inner alignment. + /// This call may extend the size (see the example) + /// # Examples + /// + /// ``` + /// use zenoh_shm::api::provider::types::AllocAlignment; + /// + /// let alignment = AllocAlignment::new(2); // 4-byte alignment + /// let initial_size: usize = 7; + /// let aligned_size = alignment.align_size(initial_size); + /// assert_eq!(aligned_size, 8); + /// ``` + #[zenoh_macros::unstable_doc] + pub fn align_size(&self, size: usize) -> usize { + let alignment = self.get_alignment_value(); + match size % alignment { + 0 => size, + remainder => size + (alignment - remainder), + } + } +} + +/// Memory layout representation: alignemnt and size aligned for this alignment +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct MemoryLayout { + size: usize, + alignment: AllocAlignment, +} + +impl Display for MemoryLayout { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!( + "[size={},alignment={}]", + self.size, self.alignment + )) + } +} + +impl MemoryLayout { + /// Try to create a new memory layout + #[zenoh_macros::unstable_doc] + pub fn new(size: usize, alignment: AllocAlignment) -> ZResult { + // size of an allocation must be a miltiple of it's alignment! + match size % alignment.get_alignment_value() { + 0 => Ok(Self { size, alignment }), + _ => bail!("size of an allocation must be a miltiple of it's alignment!"), + } + } + + #[zenoh_macros::unstable_doc] + pub fn size(&self) -> usize { + self.size + } + + #[zenoh_macros::unstable_doc] + pub fn alignment(&self) -> AllocAlignment { + self.alignment + } + + /// Realign the layout for new alignment. The alignment must be >= of the existing one. + /// # Examples + /// + /// ``` + /// use zenoh_shm::api::provider::types::AllocAlignment; + /// use zenoh_shm::api::provider::types::MemoryLayout; + /// + /// // 8 bytes with 4-byte alignment + /// let layout4b = MemoryLayout::new(8, AllocAlignment::new(2)).unwrap(); + /// + /// // Try to realign with 2-byte alignment + /// let layout2b = layout4b.extend(AllocAlignment::new(1)); + /// assert!(layout2b.is_err()); // fails because new alignment must be >= old + /// + /// // Try to realign with 8-byte alignment + /// let layout8b = layout4b.extend(AllocAlignment::new(3)); + /// assert!(layout8b.is_ok()); // ok + /// ``` + #[zenoh_macros::unstable_doc] + pub fn extend(&self, new_alignment: AllocAlignment) -> ZResult { + if self.alignment <= new_alignment { + let new_size = new_alignment.align_size(self.size); + return MemoryLayout::new(new_size, new_alignment); + } + bail!( + "Cannot extend alignment form {} to {}: new alignment must be >= old!", + self.alignment, + new_alignment + ) + } +} + +/// SHM chunk allocation result +#[zenoh_macros::unstable_doc] +pub type ChunkAllocResult = Result; + +/// SHM buffer allocation result +#[zenoh_macros::unstable_doc] +pub type BufAllocResult = Result; diff --git a/commons/zenoh-shm/src/api/slice/mod.rs b/commons/zenoh-shm/src/api/slice/mod.rs new file mode 100644 index 0000000000..59c793f94a --- /dev/null +++ b/commons/zenoh-shm/src/api/slice/mod.rs @@ -0,0 +1,17 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod traits; +pub mod zsliceshm; +pub mod zsliceshmmut; diff --git a/commons/zenoh-shm/src/api/slice/traits.rs b/commons/zenoh-shm/src/api/slice/traits.rs new file mode 100644 index 0000000000..9104abc4a1 --- /dev/null +++ b/commons/zenoh-shm/src/api/slice/traits.rs @@ -0,0 +1,24 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::ops::{Deref, DerefMut}; + +#[zenoh_macros::unstable_doc] +pub trait SHMBuf: Deref + AsRef<[u8]> { + #[zenoh_macros::unstable_doc] + fn is_valid(&self) -> bool; +} + +#[zenoh_macros::unstable_doc] +pub trait SHMBufMut: SHMBuf + DerefMut + AsMut<[u8]> {} diff --git a/commons/zenoh-shm/src/api/slice/zsliceshm.rs b/commons/zenoh-shm/src/api/slice/zsliceshm.rs new file mode 100644 index 0000000000..86f4395ebb --- /dev/null +++ b/commons/zenoh-shm/src/api/slice/zsliceshm.rs @@ -0,0 +1,172 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use core::ops::Deref; +use std::{ + borrow::{Borrow, BorrowMut}, + ops::DerefMut, +}; + +use zenoh_buffers::{ZBuf, ZSlice}; + +use crate::SharedMemoryBuf; + +use super::{traits::SHMBuf, zsliceshmmut::zsliceshmmut}; + +/// An immutable SHM slice +#[zenoh_macros::unstable_doc] +#[repr(transparent)] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ZSliceShm(pub(crate) SharedMemoryBuf); + +impl SHMBuf for ZSliceShm { + fn is_valid(&self) -> bool { + self.0.is_valid() + } +} + +impl PartialEq<&zsliceshm> for ZSliceShm { + fn eq(&self, other: &&zsliceshm) -> bool { + self.0 == other.0 .0 + } +} + +impl Borrow for ZSliceShm { + fn borrow(&self) -> &zsliceshm { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl BorrowMut for ZSliceShm { + fn borrow_mut(&mut self) -> &mut zsliceshm { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl Deref for ZSliceShm { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + self.0.as_ref() + } +} + +impl AsRef<[u8]> for ZSliceShm { + fn as_ref(&self) -> &[u8] { + self + } +} + +impl From for ZSliceShm { + fn from(value: SharedMemoryBuf) -> Self { + Self(value) + } +} + +impl From for ZSlice { + fn from(value: ZSliceShm) -> Self { + value.0.into() + } +} + +impl From for ZBuf { + fn from(value: ZSliceShm) -> Self { + value.0.into() + } +} + +impl TryFrom<&mut ZSliceShm> for &mut zsliceshmmut { + type Error = (); + + fn try_from(value: &mut ZSliceShm) -> Result { + match value.0.is_unique() && value.0.is_valid() { + true => { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + Ok(unsafe { core::mem::transmute(value) }) + } + false => Err(()), + } + } +} + +/// A borrowed immutable SHM slice +#[zenoh_macros::unstable_doc] +#[derive(Debug, PartialEq, Eq)] +#[allow(non_camel_case_types)] +#[repr(transparent)] +pub struct zsliceshm(ZSliceShm); + +impl ToOwned for zsliceshm { + type Owned = ZSliceShm; + + fn to_owned(&self) -> Self::Owned { + self.0.clone() + } +} + +impl PartialEq for &zsliceshm { + fn eq(&self, other: &ZSliceShm) -> bool { + self.0 .0 == other.0 + } +} + +impl Deref for zsliceshm { + type Target = ZSliceShm; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for zsliceshm { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From<&SharedMemoryBuf> for &zsliceshm { + fn from(value: &SharedMemoryBuf) -> Self { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(value) } + } +} + +impl From<&mut SharedMemoryBuf> for &mut zsliceshm { + fn from(value: &mut SharedMemoryBuf) -> Self { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(value) } + } +} + +impl TryFrom<&mut zsliceshm> for &mut zsliceshmmut { + type Error = (); + + fn try_from(value: &mut zsliceshm) -> Result { + match value.0 .0.is_unique() && value.0 .0.is_valid() { + true => { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + Ok(unsafe { core::mem::transmute(value) }) + } + false => Err(()), + } + } +} diff --git a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs b/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs new file mode 100644 index 0000000000..62823785da --- /dev/null +++ b/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs @@ -0,0 +1,189 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use core::ops::{Deref, DerefMut}; +use std::borrow::{Borrow, BorrowMut}; + +use zenoh_buffers::{ZBuf, ZSlice}; + +use crate::SharedMemoryBuf; + +use super::{ + traits::{SHMBuf, SHMBufMut}, + zsliceshm::{zsliceshm, ZSliceShm}, +}; + +/// A mutable SHM slice +#[zenoh_macros::unstable_doc] +#[derive(Debug, PartialEq, Eq)] +#[repr(transparent)] +pub struct ZSliceShmMut(SharedMemoryBuf); + +impl SHMBuf for ZSliceShmMut { + fn is_valid(&self) -> bool { + self.0.is_valid() + } +} + +impl SHMBufMut for ZSliceShmMut {} + +impl ZSliceShmMut { + pub(crate) unsafe fn new_unchecked(data: SharedMemoryBuf) -> Self { + Self(data) + } +} + +impl PartialEq for &ZSliceShmMut { + fn eq(&self, other: &zsliceshmmut) -> bool { + self.0 == other.0 .0 + } +} + +impl TryFrom for ZSliceShmMut { + type Error = SharedMemoryBuf; + + fn try_from(value: SharedMemoryBuf) -> Result { + match value.is_unique() && value.is_valid() { + true => Ok(Self(value)), + false => Err(value), + } + } +} + +impl TryFrom for ZSliceShmMut { + type Error = ZSliceShm; + + fn try_from(value: ZSliceShm) -> Result { + match value.0.is_unique() && value.0.is_valid() { + true => Ok(Self(value.0)), + false => Err(value), + } + } +} + +impl Borrow for ZSliceShmMut { + fn borrow(&self) -> &zsliceshm { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl BorrowMut for ZSliceShmMut { + fn borrow_mut(&mut self) -> &mut zsliceshm { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl Borrow for ZSliceShmMut { + fn borrow(&self) -> &zsliceshmmut { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl BorrowMut for ZSliceShmMut { + fn borrow_mut(&mut self) -> &mut zsliceshmmut { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl Deref for ZSliceShmMut { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + self.0.as_ref() + } +} + +impl DerefMut for ZSliceShmMut { + fn deref_mut(&mut self) -> &mut Self::Target { + self.0.as_mut() + } +} + +impl AsRef<[u8]> for ZSliceShmMut { + fn as_ref(&self) -> &[u8] { + self + } +} + +impl AsMut<[u8]> for ZSliceShmMut { + fn as_mut(&mut self) -> &mut [u8] { + self + } +} + +impl From for ZSliceShm { + fn from(value: ZSliceShmMut) -> Self { + value.0.into() + } +} + +impl From for ZSlice { + fn from(value: ZSliceShmMut) -> Self { + value.0.into() + } +} + +impl From for ZBuf { + fn from(value: ZSliceShmMut) -> Self { + value.0.into() + } +} + +/// A borrowed mutable SHM slice +#[zenoh_macros::unstable_doc] +#[derive(Debug, PartialEq, Eq)] +#[allow(non_camel_case_types)] +#[repr(transparent)] +pub struct zsliceshmmut(ZSliceShmMut); + +impl PartialEq for &zsliceshmmut { + fn eq(&self, other: &ZSliceShmMut) -> bool { + self.0 .0 == other.0 + } +} + +impl Deref for zsliceshmmut { + type Target = ZSliceShmMut; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for zsliceshmmut { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl TryFrom<&mut SharedMemoryBuf> for &mut zsliceshmmut { + type Error = (); + + fn try_from(value: &mut SharedMemoryBuf) -> Result { + match value.is_unique() && value.is_valid() { + // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // to SharedMemoryBuf type, so it is safe to transmute them in any direction + true => Ok(unsafe { core::mem::transmute(value) }), + false => Err(()), + } + } +} diff --git a/commons/zenoh-shm/src/header/allocated_descriptor.rs b/commons/zenoh-shm/src/header/allocated_descriptor.rs new file mode 100644 index 0000000000..f800683595 --- /dev/null +++ b/commons/zenoh-shm/src/header/allocated_descriptor.rs @@ -0,0 +1,26 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use super::{descriptor::OwnedHeaderDescriptor, storage::GLOBAL_HEADER_STORAGE}; + +#[derive(Debug)] +pub struct AllocatedHeaderDescriptor { + pub descriptor: OwnedHeaderDescriptor, +} + +impl Drop for AllocatedHeaderDescriptor { + fn drop(&mut self) { + GLOBAL_HEADER_STORAGE.reclaim_header(self.descriptor.clone()); + } +} diff --git a/commons/zenoh-shm/src/header/chunk_header.rs b/commons/zenoh-shm/src/header/chunk_header.rs new file mode 100644 index 0000000000..c5eb11bb7c --- /dev/null +++ b/commons/zenoh-shm/src/header/chunk_header.rs @@ -0,0 +1,28 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::{AtomicBool, AtomicU32}; + +// Chunk header +#[stabby::stabby] +#[derive(Debug)] +pub struct ChunkHeaderType { + /* + TODO: We don't really need 32 bits here, but access to 16-bit felds with 1 byte alignment is less performant on most of the platforms. + We need to bench and select reasonable integer sizes here once we have an implementation to bench + */ + pub refcount: AtomicU32, + pub watchdog_invalidated: AtomicBool, + pub generation: AtomicU32, +} diff --git a/commons/zenoh-shm/src/header/descriptor.rs b/commons/zenoh-shm/src/header/descriptor.rs new file mode 100644 index 0000000000..7700eb90c6 --- /dev/null +++ b/commons/zenoh-shm/src/header/descriptor.rs @@ -0,0 +1,63 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::Arc; + +use super::{chunk_header::ChunkHeaderType, segment::HeaderSegment}; + +pub type HeaderSegmentID = u16; +pub type HeaderIndex = u16; + +#[derive(Clone, Eq, Hash, PartialEq, PartialOrd, Ord, Debug)] +pub struct HeaderDescriptor { + pub id: HeaderSegmentID, + pub index: HeaderIndex, +} + +impl From<&OwnedHeaderDescriptor> for HeaderDescriptor { + fn from(item: &OwnedHeaderDescriptor) -> Self { + let id = item.segment.array.id(); + let index = unsafe { item.segment.array.index(item.header) }; + + Self { id, index } + } +} + +#[derive(Clone)] +pub struct OwnedHeaderDescriptor { + segment: Arc, + header: *const ChunkHeaderType, +} + +unsafe impl Send for OwnedHeaderDescriptor {} +unsafe impl Sync for OwnedHeaderDescriptor {} + +impl OwnedHeaderDescriptor { + pub(crate) fn new(segment: Arc, header: *const ChunkHeaderType) -> Self { + Self { segment, header } + } + + #[inline(always)] + pub fn header(&self) -> &ChunkHeaderType { + unsafe { &(*self.header) } + } +} + +impl std::fmt::Debug for OwnedHeaderDescriptor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OwnedHeaderDescriptor") + .field("header", &self.header) + .finish() + } +} diff --git a/commons/zenoh-shm/src/header/mod.rs b/commons/zenoh-shm/src/header/mod.rs new file mode 100644 index 0000000000..84acc86e87 --- /dev/null +++ b/commons/zenoh-shm/src/header/mod.rs @@ -0,0 +1,23 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod descriptor; + +tested_crate_module!(storage); +tested_crate_module!(subscription); + +pub(crate) mod allocated_descriptor; +pub(crate) mod chunk_header; + +mod segment; diff --git a/commons/zenoh-shm/src/header/segment.rs b/commons/zenoh-shm/src/header/segment.rs new file mode 100644 index 0000000000..e36e54a233 --- /dev/null +++ b/commons/zenoh-shm/src/header/segment.rs @@ -0,0 +1,40 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use zenoh_result::ZResult; + +use crate::posix_shm::array::ArrayInSHM; + +use super::{ + chunk_header::ChunkHeaderType, + descriptor::{HeaderIndex, HeaderSegmentID}, +}; + +const HEADER_SEGMENT_PREFIX: &str = "header"; + +pub struct HeaderSegment { + pub array: ArrayInSHM, +} + +impl HeaderSegment { + pub fn create(header_count: usize) -> ZResult { + let array = ArrayInSHM::create(header_count, HEADER_SEGMENT_PREFIX)?; + Ok(Self { array }) + } + + pub fn open(id: HeaderSegmentID) -> ZResult { + let array = ArrayInSHM::open(id, HEADER_SEGMENT_PREFIX)?; + Ok(Self { array }) + } +} diff --git a/commons/zenoh-shm/src/header/storage.rs b/commons/zenoh-shm/src/header/storage.rs new file mode 100644 index 0000000000..c09fa83dba --- /dev/null +++ b/commons/zenoh-shm/src/header/storage.rs @@ -0,0 +1,87 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use lazy_static::lazy_static; +use std::{ + collections::LinkedList, + sync::{Arc, Mutex}, +}; + +use zenoh_result::{zerror, ZResult}; + +use super::{ + allocated_descriptor::AllocatedHeaderDescriptor, + descriptor::{HeaderIndex, OwnedHeaderDescriptor}, + segment::HeaderSegment, +}; + +lazy_static! { + pub static ref GLOBAL_HEADER_STORAGE: HeaderStorage = HeaderStorage::new(32768usize).unwrap(); +} + +pub struct HeaderStorage { + available: Arc>>, +} + +impl HeaderStorage { + fn new(initial_header_count: usize) -> ZResult { + let initial_segment = Arc::new(HeaderSegment::create(initial_header_count)?); + let mut initially_available = LinkedList::::default(); + + for index in 0..initial_header_count { + let header = unsafe { initial_segment.array.elem(index as HeaderIndex) }; + let descriptor = OwnedHeaderDescriptor::new(initial_segment.clone(), header); + + // init generation (this is not really necessary, but we do) + descriptor + .header() + .generation + .store(0, std::sync::atomic::Ordering::SeqCst); + + initially_available.push_back(descriptor); + } + + Ok(Self { + available: Arc::new(Mutex::new(initially_available)), + }) + } + + pub fn allocate_header(&self) -> ZResult { + let mut guard = self.available.lock().map_err(|e| zerror!("{e}"))?; + let popped = guard.pop_front(); + drop(guard); + + let descriptor = popped.ok_or_else(|| zerror!("no free headers available"))?; + + //initialize header fields + let header = descriptor.header(); + header + .refcount + .store(1, std::sync::atomic::Ordering::SeqCst); + header + .watchdog_invalidated + .store(false, std::sync::atomic::Ordering::SeqCst); + + Ok(AllocatedHeaderDescriptor { descriptor }) + } + + pub fn reclaim_header(&self, header: OwnedHeaderDescriptor) { + // header deallocated - increment it's generation to invalidate any existing references + header + .header() + .generation + .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + let mut guard = self.available.lock().unwrap(); + guard.push_front(header); + } +} diff --git a/commons/zenoh-shm/src/header/subscription.rs b/commons/zenoh-shm/src/header/subscription.rs new file mode 100644 index 0000000000..49ad170aea --- /dev/null +++ b/commons/zenoh-shm/src/header/subscription.rs @@ -0,0 +1,61 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use lazy_static::lazy_static; +use std::{ + collections::BTreeMap, + sync::{Arc, Mutex}, +}; + +use zenoh_result::{zerror, ZResult}; + +use super::{ + descriptor::{HeaderDescriptor, HeaderSegmentID, OwnedHeaderDescriptor}, + segment::HeaderSegment, +}; + +lazy_static! { + pub static ref GLOBAL_HEADER_SUBSCRIPTION: Subscription = Subscription::new(); +} + +pub struct Subscription { + linked_table: Mutex>>, +} + +impl Subscription { + fn new() -> Self { + Self { + linked_table: Mutex::default(), + } + } + + pub fn link(&self, descriptor: &HeaderDescriptor) -> ZResult { + let mut guard = self.linked_table.lock().map_err(|e| zerror!("{e}"))?; + // ensure segment + let segment = match guard.entry(descriptor.id) { + std::collections::btree_map::Entry::Vacant(vacant) => { + let segment = Arc::new(HeaderSegment::open(descriptor.id)?); + vacant.insert(segment.clone()); + segment + } + std::collections::btree_map::Entry::Occupied(occupied) => occupied.get().clone(), + }; + drop(guard); + + // construct owned descriptor + // SAFETY: HeaderDescriptor source guarantees that descriptor.index is valid for segment + let header = unsafe { segment.array.elem(descriptor.index) }; + let owned_descriptor = OwnedHeaderDescriptor::new(segment, header); + Ok(owned_descriptor) + } +} diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index 82f3614380..abcdd558fb 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -11,79 +11,82 @@ // Contributors: // ZettaScale Zenoh Team, // -use shared_memory::{Shmem, ShmemConf, ShmemError}; +use api::{common::types::ProtocolID, provider::chunk::ChunkDescriptor}; +use header::descriptor::{HeaderDescriptor, OwnedHeaderDescriptor}; use std::{ any::Any, - cmp, - collections::{binary_heap::BinaryHeap, HashMap}, - fmt, mem, - sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, + sync::{ + atomic::{AtomicPtr, Ordering}, + Arc, + }, }; +use watchdog::{confirmator::ConfirmedDescriptor, descriptor::Descriptor}; use zenoh_buffers::ZSliceBuffer; -use zenoh_result::{zerror, ShmError, ZResult}; -const MIN_FREE_CHUNK_SIZE: usize = 1_024; -const ACCOUNTED_OVERHEAD: usize = 4_096; -const ZENOH_SHM_PREFIX: &str = "zenoh_shm_zid"; - -// Chunk header -type ChunkHeaderType = AtomicUsize; -const CHUNK_HEADER_SIZE: usize = std::mem::size_of::(); - -fn align_addr_at(addr: usize, align: usize) -> usize { - match addr % align { - 0 => addr, - r => addr + (align - r), - } +#[macro_export] +macro_rules! tested_module { + ($module:ident) => { + #[cfg(feature = "test")] + pub mod $module; + #[cfg(not(feature = "test"))] + mod $module; + }; } -#[derive(Eq, Copy, Clone, Debug)] -struct Chunk { - base_addr: *mut u8, - offset: usize, - size: usize, -} - -impl Ord for Chunk { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.size.cmp(&other.size) - } +#[macro_export] +macro_rules! tested_crate_module { + ($module:ident) => { + #[cfg(feature = "test")] + pub mod $module; + #[cfg(not(feature = "test"))] + pub(crate) mod $module; + }; } -impl PartialOrd for Chunk { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialEq for Chunk { - fn eq(&self, other: &Self) -> bool { - self.size == other.size - } -} +pub mod api; +pub mod header; +pub mod posix_shm; +pub mod reader; +pub mod watchdog; /// Informations about a [`SharedMemoryBuf`]. /// /// This that can be serialized and can be used to retrieve the [`SharedMemoryBuf`] in a remote process. #[derive(Clone, Debug, PartialEq, Eq)] pub struct SharedMemoryBufInfo { - /// The index of the beginning of the buffer in the shm segment. - pub offset: usize, - /// The length of the buffer. - pub length: usize, - /// The identifier of the shm manager that manages the shm segment this buffer points to. - pub shm_manager: String, - /// The kind of buffer. - pub kind: u8, + /// The data chunk descriptor + pub data_descriptor: ChunkDescriptor, + /// Protocol identifier for particular SharedMemory implementation + pub shm_protocol: ProtocolID, + /// Actual data length + /// NOTE: data_descriptor's len is >= of this len and describes the actual memory length + /// dedicated in shared memory segment for this particular buffer. + pub data_len: usize, + + /// The watchdog descriptor + pub watchdog_descriptor: Descriptor, + /// The header descriptor + pub header_descriptor: HeaderDescriptor, + /// The generation of the buffer + pub generation: u32, } impl SharedMemoryBufInfo { - pub fn new(offset: usize, length: usize, manager: String, kind: u8) -> SharedMemoryBufInfo { + pub fn new( + data_descriptor: ChunkDescriptor, + shm_protocol: ProtocolID, + data_len: usize, + watchdog_descriptor: Descriptor, + header_descriptor: HeaderDescriptor, + generation: u32, + ) -> SharedMemoryBufInfo { SharedMemoryBufInfo { - offset, - length, - shm_manager: manager, - kind, + data_descriptor, + shm_protocol, + data_len, + watchdog_descriptor, + header_descriptor, + generation, } } } @@ -91,20 +94,27 @@ impl SharedMemoryBufInfo { /// A zenoh buffer in shared memory. #[non_exhaustive] pub struct SharedMemoryBuf { - pub rc_ptr: AtomicPtr, - pub buf: AtomicPtr, - pub len: usize, + pub(crate) header: OwnedHeaderDescriptor, + pub(crate) buf: AtomicPtr, pub info: SharedMemoryBufInfo, + pub(crate) watchdog: Arc, +} + +impl PartialEq for SharedMemoryBuf { + fn eq(&self, other: &Self) -> bool { + // currently there is no API to resize an SHM buffer, but it is intended in the future, + // so I add size comparsion here to avoid future bugs :) + self.buf.load(Ordering::Relaxed) == other.buf.load(Ordering::Relaxed) + && self.info.data_len == other.info.data_len + } } +impl Eq for SharedMemoryBuf {} impl std::fmt::Debug for SharedMemoryBuf { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let ptr = self.rc_ptr.load(Ordering::SeqCst); - let rc = unsafe { (*ptr).load(Ordering::SeqCst) }; f.debug_struct("SharedMemoryBuf") - .field("rc", &rc) + .field("header", &self.header) .field("buf", &self.buf) - .field("len", &self.len) .field("info", &self.info) .finish() } @@ -112,44 +122,47 @@ impl std::fmt::Debug for SharedMemoryBuf { impl SharedMemoryBuf { pub fn len(&self) -> usize { - self.len + self.info.data_len } pub fn is_empty(&self) -> bool { self.len() == 0 } - pub fn get_kind(&self) -> u8 { - self.info.kind + fn is_valid(&self) -> bool { + self.header.header().generation.load(Ordering::SeqCst) == self.info.generation } - pub fn set_kind(&mut self, v: u8) { - self.info.kind = v + fn is_unique(&self) -> bool { + self.ref_count() == 1 } - pub fn owner(&self) -> String { - self.info.shm_manager.clone() + pub fn ref_count(&self) -> u32 { + self.header.header().refcount.load(Ordering::SeqCst) } - pub fn ref_count(&self) -> usize { - let rc = self.rc_ptr.load(Ordering::SeqCst); - unsafe { (*rc).load(Ordering::SeqCst) } - } - - pub fn inc_ref_count(&self) { - let rc = self.rc_ptr.load(Ordering::SeqCst); - unsafe { (*rc).fetch_add(1, Ordering::SeqCst) }; + /// Increments buffer's reference count + /// + /// # Safety + /// You should understand what you are doing, as overestimation + /// of the reference counter can lead to memory being stalled until + /// recovered by watchdog subsystem or forcely deallocated + pub unsafe fn inc_ref_count(&self) { + self.header.header().refcount.fetch_add(1, Ordering::SeqCst); } - pub fn dec_ref_count(&self) { - let rc = self.rc_ptr.load(Ordering::SeqCst); - unsafe { (*rc).fetch_sub(1, Ordering::SeqCst) }; + // PRIVATE: + fn as_slice(&self) -> &[u8] { + tracing::trace!( + "SharedMemoryBuf::as_slice() == len = {:?}", + self.info.data_len + ); + let bp = self.buf.load(Ordering::SeqCst); + unsafe { std::slice::from_raw_parts(bp, self.info.data_len) } } - pub fn as_slice(&self) -> &[u8] { - tracing::trace!("SharedMemoryBuf::as_slice() == len = {:?}", self.len); - let bp = self.buf.load(Ordering::SeqCst); - unsafe { std::slice::from_raw_parts(bp, self.len) } + unsafe fn dec_ref_count(&self) { + self.header.header().refcount.fetch_sub(1, Ordering::SeqCst); } /// Gets a mutable slice. @@ -163,342 +176,35 @@ impl SharedMemoryBuf { /// /// In short, whilst this operation is marked as unsafe, you are safe if you can /// guarantee that your in applications only one process at the time will actually write. - pub unsafe fn as_mut_slice(&mut self) -> &mut [u8] { + unsafe fn as_mut_slice_inner(&mut self) -> &mut [u8] { let bp = self.buf.load(Ordering::SeqCst); - std::slice::from_raw_parts_mut(bp, self.len) + std::slice::from_raw_parts_mut(bp, self.info.data_len) } } impl Drop for SharedMemoryBuf { fn drop(&mut self) { - self.dec_ref_count(); + // # Safety + // obviouly, we need to decrement refcount when dropping SharedMemoryBuf instance + unsafe { self.dec_ref_count() }; } } impl Clone for SharedMemoryBuf { fn clone(&self) -> Self { - self.inc_ref_count(); - let rc = self.rc_ptr.load(Ordering::SeqCst); + // # Safety + // obviouly, we need to increment refcount when cloning SharedMemoryBuf instance + unsafe { self.inc_ref_count() }; let bp = self.buf.load(Ordering::SeqCst); SharedMemoryBuf { - rc_ptr: AtomicPtr::new(rc), + header: self.header.clone(), buf: AtomicPtr::new(bp), - len: self.len, info: self.info.clone(), + watchdog: self.watchdog.clone(), } } } -/*************************************/ -/* SHARED MEMORY READER */ -/*************************************/ -pub struct SharedMemoryReader { - segments: HashMap, -} - -unsafe impl Send for SharedMemoryReader {} -unsafe impl Sync for SharedMemoryReader {} - -impl SharedMemoryReader { - pub fn new() -> Self { - Self { - segments: HashMap::new(), - } - } - - pub fn connect_map_to_shm(&mut self, info: &SharedMemoryBufInfo) -> ZResult<()> { - match ShmemConf::new().flink(&info.shm_manager).open() { - Ok(shm) => { - self.segments.insert(info.shm_manager.clone(), shm); - Ok(()) - } - Err(e) => { - let e = zerror!( - "Unable to bind shared memory segment {}: {:?}", - info.shm_manager, - e - ); - tracing::trace!("{}", e); - Err(ShmError(e).into()) - } - } - } - - pub fn try_read_shmbuf(&self, info: &SharedMemoryBufInfo) -> ZResult { - // Try read does not increment the reference count as it is assumed - // that the sender of this buffer has incremented for us. - match self.segments.get(&info.shm_manager) { - Some(shm) => { - let base_ptr = shm.as_ptr(); - let rc = unsafe { base_ptr.add(info.offset) as *mut ChunkHeaderType }; - let rc_ptr = AtomicPtr::::new(rc); - let buf = unsafe { base_ptr.add(info.offset + CHUNK_HEADER_SIZE) }; - let shmb = SharedMemoryBuf { - rc_ptr, - buf: AtomicPtr::new(buf), - len: info.length - CHUNK_HEADER_SIZE, - info: info.clone(), - }; - Ok(shmb) - } - None => { - let e = zerror!("Unable to find shared memory segment: {}", info.shm_manager); - tracing::trace!("{}", e); - Err(ShmError(e).into()) - } - } - } - - pub fn read_shmbuf(&mut self, info: &SharedMemoryBufInfo) -> ZResult { - // Read does not increment the reference count as it is assumed - // that the sender of this buffer has incremented for us. - self.try_read_shmbuf(info).or_else(|_| { - self.connect_map_to_shm(info)?; - self.try_read_shmbuf(info) - }) - } -} - -impl Default for SharedMemoryReader { - fn default() -> Self { - Self::new() - } -} - -impl fmt::Debug for SharedMemoryReader { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SharedMemoryReader").finish()?; - f.debug_list().entries(self.segments.keys()).finish() - } -} - -/// A shared memory segment manager. -/// -/// Allows to access a shared memory segment and reserve some parts of this segment for writting. -pub struct SharedMemoryManager { - segment_path: String, - size: usize, - available: usize, - own_segment: Shmem, - free_list: BinaryHeap, - busy_list: Vec, - alignment: usize, -} - -unsafe impl Send for SharedMemoryManager {} - -impl SharedMemoryManager { - /// Creates a new SharedMemoryManager managing allocations of a region of the - /// given size. - pub fn make(id: String, size: usize) -> ZResult { - let mut temp_dir = std::env::temp_dir(); - let file_name: String = format!("{ZENOH_SHM_PREFIX}_{id}"); - temp_dir.push(file_name); - let path: String = temp_dir - .to_str() - .ok_or_else(|| ShmError(zerror!("Unable to parse tmp directory: {:?}", temp_dir)))? - .to_string(); - tracing::trace!("Creating file at: {}", path); - let real_size = size + ACCOUNTED_OVERHEAD; - let shmem = match ShmemConf::new() - .size(real_size) - .flink(path.clone()) - .create() - { - Ok(m) => m, - Err(ShmemError::LinkExists) => { - return Err(ShmError(zerror!( - "Unable to open SharedMemoryManager: SharedMemory already exists" - )) - .into()) - } - Err(e) => { - return Err(ShmError(zerror!("Unable to open SharedMemoryManager: {}", e)).into()) - } - }; - let base_ptr = shmem.as_ptr(); - - let mut free_list = BinaryHeap::new(); - let chunk = Chunk { - base_addr: base_ptr, - offset: 0, - size: real_size, - }; - free_list.push(chunk); - let busy_list = vec![]; - let shm = SharedMemoryManager { - segment_path: path, - size, - available: real_size, - own_segment: shmem, - free_list, - busy_list, - alignment: mem::align_of::(), - }; - tracing::trace!( - "Created SharedMemoryManager for {:?}", - shm.own_segment.as_ptr() - ); - Ok(shm) - } - - fn free_chunk_map_to_shmbuf(&self, chunk: &Chunk) -> SharedMemoryBuf { - let info = SharedMemoryBufInfo { - offset: chunk.offset, - length: chunk.size, - shm_manager: self.segment_path.clone(), - kind: 0, - }; - let rc = chunk.base_addr as *mut ChunkHeaderType; - unsafe { (*rc).store(1, Ordering::SeqCst) }; - let rc_ptr = AtomicPtr::::new(rc); - SharedMemoryBuf { - rc_ptr, - buf: AtomicPtr::::new(unsafe { chunk.base_addr.add(CHUNK_HEADER_SIZE) }), - len: chunk.size - CHUNK_HEADER_SIZE, - info, - } - } - - pub fn alloc(&mut self, len: usize) -> ZResult { - tracing::trace!("SharedMemoryManager::alloc({})", len); - // Always allocate a size that will keep the proper alignment requirements - let required_len = align_addr_at(len + CHUNK_HEADER_SIZE, self.alignment); - if self.available < required_len { - self.garbage_collect(); - } - if self.available >= required_len { - // The strategy taken is the same for some Unix System V implementations -- as described in the - // famous Bach's book -- in essence keep an ordered list of free slot and always look for the - // biggest as that will give the biggest left-over. - match self.free_list.pop() { - Some(mut chunk) if chunk.size >= required_len => { - self.available -= required_len; - tracing::trace!("Allocator selected Chunk ({:?})", &chunk); - if chunk.size - required_len >= MIN_FREE_CHUNK_SIZE { - let free_chunk = Chunk { - base_addr: unsafe { chunk.base_addr.add(required_len) }, - offset: chunk.offset + required_len, - size: chunk.size - required_len, - }; - tracing::trace!( - "The allocation will leave a Free Chunk: {:?}", - &free_chunk - ); - self.free_list.push(free_chunk); - } - chunk.size = required_len; - let shm_buf = self.free_chunk_map_to_shmbuf(&chunk); - tracing::trace!("The allocated Chunk is ({:?})", &chunk); - tracing::trace!("Allocated Shared Memory Buffer: {:?}", &shm_buf); - self.busy_list.push(chunk); - Ok(shm_buf) - } - Some(c) => { - self.free_list.push(c); - let e = zerror!("SharedMemoryManager::alloc({}) cannot find any available chunk\nSharedMemoryManager::free_list = {:?}", len, self.free_list); - Err(e.into()) - } - None => { - let e = zerror!("SharedMemoryManager::alloc({}) cannot find any available chunk\nSharedMemoryManager::free_list = {:?}", len, self.free_list); - tracing::trace!("{}", e); - Err(e.into()) - } - } - } else { - let e = zerror!( "SharedMemoryManager does not have sufficient free memory to allocate {} bytes, try de-fragmenting!", len); - tracing::warn!("{}", e); - Err(e.into()) - } - } - - fn is_free_chunk(chunk: &Chunk) -> bool { - let rc_ptr = chunk.base_addr as *mut ChunkHeaderType; - let rc = unsafe { (*rc_ptr).load(Ordering::SeqCst) }; - rc == 0 - } - - fn try_merge_adjacent_chunks(a: &Chunk, b: &Chunk) -> Option { - let end_addr = unsafe { a.base_addr.add(a.size) }; - if end_addr == b.base_addr { - Some(Chunk { - base_addr: a.base_addr, - size: a.size + b.size, - offset: a.offset, - }) - } else { - None - } - } - // Returns the amount of memory that it was able to de-fragment - pub fn defragment(&mut self) -> usize { - if self.free_list.len() > 1 { - let mut fbs: Vec = self.free_list.drain().collect(); - fbs.sort_by(|x, y| x.offset.partial_cmp(&y.offset).unwrap()); - let mut current = fbs.remove(0); - let mut defrag_mem = 0; - let mut i = 0; - let n = fbs.len(); - for chunk in fbs.iter() { - i += 1; - let next = *chunk; - match SharedMemoryManager::try_merge_adjacent_chunks(¤t, &next) { - Some(c) => { - current = c; - defrag_mem += current.size; - if i == n { - self.free_list.push(current) - } - } - None => { - self.free_list.push(current); - if i == n { - self.free_list.push(next); - } else { - current = next; - } - } - } - } - defrag_mem - } else { - 0 - } - } - - /// Returns the amount of memory freed - pub fn garbage_collect(&mut self) -> usize { - tracing::trace!("Running Garbage Collector"); - - let mut freed = 0; - let (free, busy) = self - .busy_list - .iter() - .partition(|&c| SharedMemoryManager::is_free_chunk(c)); - self.busy_list = busy; - - for f in free { - freed += f.size; - tracing::trace!("Garbage Collecting Chunk: {:?}", f); - self.free_list.push(f) - } - self.available += freed; - freed - } -} - -impl fmt::Debug for SharedMemoryManager { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SharedMemoryManager") - .field("segment_path", &self.segment_path) - .field("size", &self.size) - .field("available", &self.available) - .field("free_list.len", &self.free_list.len()) - .field("busy_list.len", &self.busy_list.len()) - .finish() - } -} - // Buffer impls // - SharedMemoryBuf impl AsRef<[u8]> for SharedMemoryBuf { @@ -509,7 +215,7 @@ impl AsRef<[u8]> for SharedMemoryBuf { impl AsMut<[u8]> for SharedMemoryBuf { fn as_mut(&mut self) -> &mut [u8] { - unsafe { self.as_mut_slice() } + unsafe { self.as_mut_slice_inner() } } } @@ -517,10 +223,12 @@ impl ZSliceBuffer for SharedMemoryBuf { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } diff --git a/commons/zenoh-shm/src/posix_shm/array.rs b/commons/zenoh-shm/src/posix_shm/array.rs new file mode 100644 index 0000000000..d092c579b5 --- /dev/null +++ b/commons/zenoh-shm/src/posix_shm/array.rs @@ -0,0 +1,124 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Display, marker::PhantomData, mem::size_of}; + +use num_traits::{AsPrimitive, PrimInt, Unsigned}; +use stabby::IStable; +use zenoh_result::{bail, ZResult}; + +use super::segment::Segment; + +/// An SHM segment that is intended to be an array of elements of some certain type +#[derive(Debug)] +pub struct ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + inner: Segment, + _phantom: PhantomData<(Elem, ElemIndex)>, +} + +unsafe impl Sync for ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ +} +unsafe impl Send for ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ +} + +impl ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive, + Elem: IStable, + isize: AsPrimitive, +{ + // Perform compile time check that Elem is not a ZST in such a way `elem_count` can not panic. + const _S: () = if size_of::() == 0 { + panic!("Elem is a ZST. ZSTs are not allowed as ArrayInSHM generic"); + }; + + pub fn create(elem_count: usize, file_prefix: &str) -> ZResult { + if elem_count == 0 { + bail!("Unable to create SHM array segment of 0 elements") + } + + let max: usize = ElemIndex::max_value().as_(); + if elem_count - 1 > max { + bail!("Unable to create SHM array segment of {elem_count} elements: out of range for ElemIndex!") + } + + let alloc_size = elem_count * size_of::(); + let inner = Segment::create(alloc_size, file_prefix)?; + Ok(Self { + inner, + _phantom: PhantomData, + }) + } + + pub fn open(id: ID, file_prefix: &str) -> ZResult { + let inner = Segment::open(id, file_prefix)?; + Ok(Self { + inner, + _phantom: PhantomData, + }) + } + + pub fn id(&self) -> ID { + self.inner.id() + } + + pub fn elem_count(&self) -> usize { + self.inner.len() / size_of::() + } + + /// # Safety + /// Retrieves const element by it's index. This is safe if the index doesn't go out of underlying array. + /// Additional assert to check the index validity is added for "test" feature + pub unsafe fn elem(&self, index: ElemIndex) -> *const Elem { + #[cfg(feature = "test")] + assert!(self.inner.len() > index.as_() * size_of::()); + (self.inner.as_ptr() as *const Elem).add(index.as_()) + } + + /// # Safety + /// Retrieves mut element by it's index. This is safe if the index doesn't go out of underlying array. + /// Additional assert to check the index validity is added for "test" feature + pub unsafe fn elem_mut(&self, index: ElemIndex) -> *mut Elem { + #[cfg(feature = "test")] + assert!(self.inner.len() > index.as_() * size_of::()); + (self.inner.as_ptr() as *mut Elem).add(index.as_()) + } + + /// # Safety + /// Calculates element's index. This is safe if the element belongs to underlying array. + /// Additional assert is added for "test" feature + pub unsafe fn index(&self, elem: *const Elem) -> ElemIndex { + let index = elem.offset_from(self.inner.as_ptr() as *const Elem); + #[cfg(feature = "test")] + { + assert!(index >= 0); + assert!(self.inner.len() > index as usize * size_of::()); + } + index.as_() + } +} diff --git a/commons/zenoh-shm/src/posix_shm/mod.rs b/commons/zenoh-shm/src/posix_shm/mod.rs new file mode 100644 index 0000000000..a63b1c9e6d --- /dev/null +++ b/commons/zenoh-shm/src/posix_shm/mod.rs @@ -0,0 +1,16 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod array; +tested_crate_module!(segment); diff --git a/commons/zenoh-shm/src/posix_shm/segment.rs b/commons/zenoh-shm/src/posix_shm/segment.rs new file mode 100644 index 0000000000..d987bad7a9 --- /dev/null +++ b/commons/zenoh-shm/src/posix_shm/segment.rs @@ -0,0 +1,127 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + fmt::{Debug, Display}, + mem::size_of, +}; + +use rand::Rng; +use shared_memory::{Shmem, ShmemConf, ShmemError}; +use zenoh_result::{bail, zerror, ZResult}; + +const SEGMENT_DEDICATE_TRIES: usize = 100; +const ECMA: crc::Crc = crc::Crc::::new(&crc::CRC_64_ECMA_182); + +/// Segment of shared memory identified by an ID +pub struct Segment +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + shmem: Shmem, + id: ID, +} + +impl Debug for Segment +where + ID: Debug, + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Segment") + .field("shmem", &self.shmem.as_ptr()) + .field("id", &self.id) + .finish() + } +} + +impl Segment +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + // Automatically generate free id and create a new segment identified by this id + pub fn create(alloc_size: usize, id_prefix: &str) -> ZResult { + for _ in 0..SEGMENT_DEDICATE_TRIES { + // Generate random id + let id: ID = rand::thread_rng().gen(); + + // Try to create a new segment identified by prefix and generated id. + // If creation fails because segment already exists for this id, + // the creation attempt will be repeated with another id + match ShmemConf::new() + .size(alloc_size + size_of::()) + .os_id(Self::os_id(id.clone(), id_prefix)) + .create() + { + Ok(shmem) => { + tracing::debug!( + "Created SHM segment, size: {alloc_size}, prefix: {id_prefix}, id: {id}" + ); + unsafe { *(shmem.as_ptr() as *mut usize) = alloc_size }; + return Ok(Segment { shmem, id }); + } + Err(ShmemError::LinkExists) => {} + Err(ShmemError::MappingIdExists) => {} + Err(e) => bail!("Unable to create POSIX shm segment: {}", e), + } + } + bail!("Unable to dedicate POSIX shm segment file after {SEGMENT_DEDICATE_TRIES} tries!"); + } + + // Open an existing segment identified by id + pub fn open(id: ID, id_prefix: &str) -> ZResult { + let shmem = ShmemConf::new() + .os_id(Self::os_id(id.clone(), id_prefix)) + .open() + .map_err(|e| { + zerror!( + "Error opening POSIX shm segment id {id}, prefix: {id_prefix}: {}", + e + ) + })?; + + if shmem.len() <= size_of::() { + bail!("SHM segment too small") + } + + tracing::debug!("Opened SHM segment, prefix: {id_prefix}, id: {id}"); + + Ok(Self { shmem, id }) + } + + fn os_id(id: ID, id_prefix: &str) -> String { + let os_id_str = format!("{id_prefix}_{id}"); + let crc_os_id_str = ECMA.checksum(os_id_str.as_bytes()); + format!("{:x}", crc_os_id_str) + } + + pub fn as_ptr(&self) -> *mut u8 { + unsafe { self.shmem.as_ptr().add(size_of::()) } + } + + pub fn len(&self) -> usize { + unsafe { *(self.shmem.as_ptr() as *mut usize) } + } + + pub fn is_empty(&self) -> bool { + unsafe { *(self.shmem.as_ptr() as *mut usize) == 0 } + } + + pub fn id(&self) -> ID { + self.id.clone() + } +} diff --git a/commons/zenoh-shm/src/reader.rs b/commons/zenoh-shm/src/reader.rs new file mode 100644 index 0000000000..c2ce2303a9 --- /dev/null +++ b/commons/zenoh-shm/src/reader.rs @@ -0,0 +1,147 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{collections::HashMap, ops::Deref, sync::Arc}; + +use zenoh_core::{bail, zerror}; +use zenoh_result::ZResult; + +use crate::{ + api::{ + client::shared_memory_segment::SharedMemorySegment, + client_storage::SharedMemoryClientStorage, + common::types::{ProtocolID, SegmentID}, + }, + header::subscription::GLOBAL_HEADER_SUBSCRIPTION, + watchdog::confirmator::GLOBAL_CONFIRMATOR, + SharedMemoryBuf, SharedMemoryBufInfo, +}; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct SharedMemoryReader { + client_storage: Arc, +} + +impl Deref for SharedMemoryReader { + type Target = SharedMemoryClientStorage; + + fn deref(&self) -> &Self::Target { + &self.client_storage + } +} + +impl SharedMemoryReader { + pub fn new(client_storage: Arc) -> Self { + Self { client_storage } + } + + pub fn read_shmbuf(&self, info: &SharedMemoryBufInfo) -> ZResult { + // Read does not increment the reference count as it is assumed + // that the sender of this buffer has incremented it for us. + + // attach to the watchdog before doing other things + let watchdog = Arc::new(GLOBAL_CONFIRMATOR.add(&info.watchdog_descriptor)?); + + let segment = self.ensure_segment(info)?; + let shmb = SharedMemoryBuf { + header: GLOBAL_HEADER_SUBSCRIPTION.link(&info.header_descriptor)?, + buf: segment.map(info.data_descriptor.chunk)?, + info: info.clone(), + watchdog, + }; + + // Validate buffer + match shmb.is_valid() { + true => Ok(shmb), + false => bail!("Buffer is invalidated"), + } + } + + fn ensure_segment(&self, info: &SharedMemoryBufInfo) -> ZResult> { + let id = GlobalDataSegmentID::new(info.shm_protocol, info.data_descriptor.segment); + + // fastest path: try to get access to already mounted SHM segment + // read lock allows concurrent execution of multiple requests + let r_guard = self.segments.read().unwrap(); + if let Some(val) = r_guard.get(&id) { + return Ok(val.clone()); + } + // fastest path failed: need to mount a new segment + + // drop read lock because we're gonna obtain write lock further + drop(r_guard); + + // find appropriate client + let client = self + .clients + .get_clients() + .get(&id.protocol) + .ok_or_else(|| zerror!("Unsupported SHM protocol: {}", id.protocol))?; + + // obtain write lock... + let mut w_guard = self.segments.write().unwrap(); + + // many concurrent threads may be racing for mounting this particular segment, so we must check again if the segment exists + match w_guard.entry(id) { + // (rare case) segment already mounted + std::collections::hash_map::Entry::Occupied(occupied) => Ok(occupied.get().clone()), + + // (common case) mount a new segment and add it to the map + std::collections::hash_map::Entry::Vacant(vacant) => { + let new_segment = client.attach(info.data_descriptor.segment)?; + Ok(vacant.insert(new_segment).clone()) + } + } + } +} + +#[derive(Debug)] +pub(crate) struct ClientStorage +where + Inner: Sized, +{ + clients: HashMap, +} + +impl ClientStorage { + pub(crate) fn new(clients: HashMap) -> Self { + Self { clients } + } + + pub(crate) fn get_clients(&self) -> &HashMap { + &self.clients + } +} + +/// # Safety +/// Only immutable access to internal container is allowed, +/// so we are Send if the contained type is Send +unsafe impl Send for ClientStorage {} + +/// # Safety +/// Only immutable access to internal container is allowed, +/// so we are Sync if the contained type is Sync +unsafe impl Sync for ClientStorage {} + +#[derive(Debug, PartialEq, Eq, Hash)] +pub(crate) struct GlobalDataSegmentID { + protocol: ProtocolID, + segment: SegmentID, +} + +impl GlobalDataSegmentID { + fn new(protocol: ProtocolID, segment: SegmentID) -> Self { + Self { protocol, segment } + } +} diff --git a/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs b/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs new file mode 100644 index 0000000000..45917d5bdc --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs @@ -0,0 +1,35 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use super::{descriptor::OwnedDescriptor, storage::GLOBAL_STORAGE, validator::GLOBAL_VALIDATOR}; + +#[derive(Debug)] +pub struct AllocatedWatchdog { + pub descriptor: OwnedDescriptor, +} + +impl AllocatedWatchdog { + pub(crate) fn new(descriptor: OwnedDescriptor) -> Self { + // reset descriptor on allocation + descriptor.validate(); + Self { descriptor } + } +} + +impl Drop for AllocatedWatchdog { + fn drop(&mut self) { + GLOBAL_VALIDATOR.remove(self.descriptor.clone()); + GLOBAL_STORAGE.free_watchdog(self.descriptor.clone()); + } +} diff --git a/commons/zenoh-shm/src/watchdog/confirmator.rs b/commons/zenoh-shm/src/watchdog/confirmator.rs new file mode 100644 index 0000000000..54c2d233dc --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/confirmator.rs @@ -0,0 +1,192 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + collections::BTreeMap, + sync::{Arc, RwLock}, + time::Duration, +}; + +use lazy_static::lazy_static; +use zenoh_result::{zerror, ZResult}; + +use super::{ + periodic_task::PeriodicTask, + descriptor::{Descriptor, OwnedDescriptor, SegmentID}, + segment::Segment, +}; + +lazy_static! { + pub static ref GLOBAL_CONFIRMATOR: WatchdogConfirmator = + WatchdogConfirmator::new(Duration::from_millis(50)); +} + +pub struct ConfirmedDescriptor { + pub owned: OwnedDescriptor, + confirmed: Arc, +} + +impl Drop for ConfirmedDescriptor { + fn drop(&mut self) { + self.confirmed.remove(self.owned.clone()); + } +} + +impl ConfirmedDescriptor { + fn new(owned: OwnedDescriptor, confirmed: Arc) -> Self { + owned.confirm(); + confirmed.add(owned.clone()); + Self { owned, confirmed } + } +} + +#[derive(PartialEq)] +enum Transaction { + Add, + Remove, +} + +struct ConfirmedSegment { + segment: Arc, + transactions: lockfree::queue::Queue<(Transaction, OwnedDescriptor)>, +} + +impl ConfirmedSegment { + fn new(segment: Arc) -> Self { + Self { + segment, + transactions: lockfree::queue::Queue::default(), + } + } + + fn add(&self, descriptor: OwnedDescriptor) { + self.transactions.push((Transaction::Add, descriptor)); + } + + fn remove(&self, descriptor: OwnedDescriptor) { + self.transactions.push((Transaction::Remove, descriptor)); + } + + fn collect_transactions(&self, watchdogs: &mut BTreeMap) { + while let Some((transaction, descriptor)) = self.transactions.pop() { + // collect transactions + match watchdogs.entry(descriptor) { + std::collections::btree_map::Entry::Vacant(vacant) => { + #[cfg(feature = "test")] + assert!(transaction == Transaction::Add); + vacant.insert(1); + } + std::collections::btree_map::Entry::Occupied(mut occupied) => match transaction { + Transaction::Add => { + *occupied.get_mut() += 1; + } + Transaction::Remove => { + if *occupied.get() == 1 { + occupied.remove(); + } else { + *occupied.get_mut() -= 1; + } + } + }, + } + } + } +} +unsafe impl Send for ConfirmedSegment {} +unsafe impl Sync for ConfirmedSegment {} + +// TODO: optimize confirmation by packing descriptors AND linked table together +// TODO: think about linked table cleanup +pub struct WatchdogConfirmator { + confirmed: RwLock>>, + segment_transactions: Arc>>, + _task: PeriodicTask, +} + +impl WatchdogConfirmator { + fn new(interval: Duration) -> Self { + let segment_transactions = Arc::>>::default(); + + let c_segment_transactions = segment_transactions.clone(); + let mut segments: Vec<(Arc, BTreeMap)> = vec![]; + let task = PeriodicTask::new("Watchdog Confirmator".to_owned(), interval, move || { + // add new segments + while let Some(new_segment) = c_segment_transactions.as_ref().pop() { + segments.push((new_segment, BTreeMap::default())); + } + + // collect all existing transactions + for (segment, watchdogs) in &mut segments { + segment.collect_transactions(watchdogs); + } + + // confirm all tracked watchdogs + for (_, watchdogs) in &segments { + for watchdog in watchdogs { + watchdog.0.confirm(); + } + } + }); + + Self { + confirmed: RwLock::default(), + segment_transactions, + _task: task, + } + } + + pub fn add_owned(&self, descriptor: &OwnedDescriptor) -> ZResult { + self.add(&Descriptor::from(descriptor)) + } + + pub fn add(&self, descriptor: &Descriptor) -> ZResult { + let guard = self.confirmed.read().map_err(|e| zerror!("{e}"))?; + if let Some(segment) = guard.get(&descriptor.id) { + return self.link(descriptor, segment); + } + drop(guard); + + let segment = Arc::new(Segment::open(descriptor.id)?); + let confirmed_segment = Arc::new(ConfirmedSegment::new(segment)); + let confirmed_descriptoir = self.link(descriptor, &confirmed_segment); + + let mut guard = self.confirmed.write().map_err(|e| zerror!("{e}"))?; + match guard.entry(descriptor.id) { + std::collections::btree_map::Entry::Vacant(vacant) => { + vacant.insert(confirmed_segment.clone()); + self.segment_transactions.push(confirmed_segment); + confirmed_descriptoir + } + std::collections::btree_map::Entry::Occupied(occupied) => { + self.link(descriptor, occupied.get()) + } + } + } + + fn link( + &self, + descriptor: &Descriptor, + segment: &Arc, + ) -> ZResult { + let index = descriptor.index_and_bitpos >> 6; + let bitpos = descriptor.index_and_bitpos & 0x3f; + + let atomic = unsafe { segment.segment.array.elem(index) }; + let mask = 1u64 << bitpos; + + let owned = OwnedDescriptor::new(segment.segment.clone(), atomic, mask); + let confirmed = ConfirmedDescriptor::new(owned, segment.clone()); + Ok(confirmed) + } +} diff --git a/commons/zenoh-shm/src/watchdog/descriptor.rs b/commons/zenoh-shm/src/watchdog/descriptor.rs new file mode 100644 index 0000000000..38fddd61e8 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/descriptor.rs @@ -0,0 +1,116 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + hash::Hash, + sync::{atomic::AtomicU64, Arc}, +}; + +use super::segment::Segment; + +pub type SegmentID = u32; + +#[derive(Clone, Eq, Hash, PartialEq, PartialOrd, Ord, Debug)] +pub struct Descriptor { + pub id: SegmentID, + pub index_and_bitpos: u32, +} + +impl From<&OwnedDescriptor> for Descriptor { + fn from(item: &OwnedDescriptor) -> Self { + let bitpos = { + // TODO: can be optimized + let mut v = item.mask; + let mut bitpos = 0u32; + while v > 1 { + bitpos += 1; + v >>= 1; + } + bitpos + }; + let index = unsafe { item.segment.array.index(item.atomic) }; + let index_and_bitpos = (index << 6) | bitpos; + Descriptor { + id: item.segment.array.id(), + index_and_bitpos, + } + } +} + +#[derive(Clone, Debug)] +pub struct OwnedDescriptor { + segment: Arc, + pub atomic: *const AtomicU64, + pub mask: u64, +} + +unsafe impl Send for OwnedDescriptor {} +unsafe impl Sync for OwnedDescriptor {} + +impl Hash for OwnedDescriptor { + fn hash(&self, state: &mut H) { + self.atomic.hash(state); + self.mask.hash(state); + } +} + +impl OwnedDescriptor { + pub(crate) fn new(segment: Arc, atomic: *const AtomicU64, mask: u64) -> Self { + Self { + segment, + atomic, + mask, + } + } + + pub fn confirm(&self) { + unsafe { + (*self.atomic).fetch_or(self.mask, std::sync::atomic::Ordering::SeqCst); + }; + } + + pub(crate) fn validate(&self) -> u64 { + unsafe { + (*self.atomic).fetch_and(!self.mask, std::sync::atomic::Ordering::SeqCst) & self.mask + } + } + + #[cfg(feature = "test")] + pub fn test_validate(&self) -> u64 { + self.validate() + } +} + +impl Ord for OwnedDescriptor { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + match self.atomic.cmp(&other.atomic) { + core::cmp::Ordering::Equal => {} + ord => return ord, + } + self.mask.cmp(&other.mask) + } +} + +impl PartialOrd for OwnedDescriptor { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for OwnedDescriptor { + fn eq(&self, other: &Self) -> bool { + self.atomic == other.atomic && self.mask == other.mask + } +} +impl Eq for OwnedDescriptor {} diff --git a/commons/zenoh-shm/src/watchdog/mod.rs b/commons/zenoh-shm/src/watchdog/mod.rs new file mode 100644 index 0000000000..55267a5442 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/mod.rs @@ -0,0 +1,24 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod descriptor; + +tested_crate_module!(periodic_task); +tested_crate_module!(storage); +tested_crate_module!(validator); +tested_crate_module!(confirmator); + +pub(crate) mod allocated_watchdog; + +mod segment; diff --git a/commons/zenoh-shm/src/watchdog/periodic_task.rs b/commons/zenoh-shm/src/watchdog/periodic_task.rs new file mode 100644 index 0000000000..98cf8fbba7 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/periodic_task.rs @@ -0,0 +1,100 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + +use thread_priority::ThreadBuilder; +#[cfg(unix)] +use thread_priority::{ + set_current_thread_priority, RealtimeThreadSchedulePolicy, ThreadPriority, ThreadPriorityValue, ThreadSchedulePolicy::Realtime +}; + +pub struct PeriodicTask { + running: Arc, +} + +impl Drop for PeriodicTask { + fn drop(&mut self) { + self.running.store(false, Ordering::Relaxed) + } +} + +impl PeriodicTask { + pub fn new(name: String, interval: Duration, mut f: F) -> Self + where + F: FnMut() + Send + 'static, + { + let running = Arc::new(AtomicBool::new(true)); + + let c_running = running.clone(); + + #[cfg(unix)] + let builder = ThreadBuilder::default() + .name(name) + .policy(Realtime(RealtimeThreadSchedulePolicy::Fifo)) + .priority(ThreadPriority::Min); + + // TODO: deal with windows realtime scheduling + #[cfg(windows)] + let builder = ThreadBuilder::default().name(name); + + let _ = builder.spawn(move |result| { + if let Err(e) = result { + #[cfg(windows)] + tracing::warn!("{:?}: error setting scheduling priority for thread: {:?}, will run with the default one...", std::thread::current().name(), e); + #[cfg(unix)] + { + tracing::warn!("{:?}: error setting realtime FIFO scheduling policy for thread: {:?}, will run with the default one...", std::thread::current().name(), e); + for priotity in (ThreadPriorityValue::MIN..ThreadPriorityValue::MAX).rev() { + if let Ok(p) = priotity.try_into() { + if set_current_thread_priority(ThreadPriority::Crossplatform(p)).is_ok() { + tracing::warn!("{:?}: will use priority {}", std::thread::current().name(), priotity); + break; + } + } + } + } + } + + //TODO: need mlock here! + + while c_running.load(Ordering::Relaxed) { + let cycle_start = std::time::Instant::now(); + + f(); + + // sleep for next iteration + let elapsed = cycle_start.elapsed(); + if elapsed < interval { + let sleep_interval = interval - elapsed; + std::thread::sleep(sleep_interval); + } else { + let err = format!("{:?}: timer overrun", std::thread::current().name()); + #[cfg(not(feature = "test"))] + tracing::error!("{err}"); + #[cfg(feature = "test")] + panic!("{err}"); + } + } + }); + + Self { running } + } +} diff --git a/commons/zenoh-shm/src/watchdog/segment.rs b/commons/zenoh-shm/src/watchdog/segment.rs new file mode 100644 index 0000000000..b4a273c01c --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/segment.rs @@ -0,0 +1,41 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::AtomicU64; + +use zenoh_result::ZResult; + +use crate::posix_shm::array::ArrayInSHM; + +use super::descriptor::SegmentID; + +const WATCHDOG_SEGMENT_PREFIX: &str = "watchdog"; + +#[derive(Debug)] +pub struct Segment { + pub array: ArrayInSHM, +} + +impl Segment { + pub fn create(watchdog_count: usize) -> ZResult { + let elem_count = (watchdog_count + 63) / 64; + let array = ArrayInSHM::create(elem_count, WATCHDOG_SEGMENT_PREFIX)?; + Ok(Self { array }) + } + + pub fn open(id: SegmentID) -> ZResult { + let array = ArrayInSHM::open(id, WATCHDOG_SEGMENT_PREFIX)?; + Ok(Self { array }) + } +} diff --git a/commons/zenoh-shm/src/watchdog/storage.rs b/commons/zenoh-shm/src/watchdog/storage.rs new file mode 100644 index 0000000000..5744a273a0 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/storage.rs @@ -0,0 +1,76 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use lazy_static::lazy_static; +use std::{ + collections::BTreeSet, + sync::{Arc, Mutex}, +}; + +use zenoh_result::{zerror, ZResult}; + +use super::{allocated_watchdog::AllocatedWatchdog, descriptor::OwnedDescriptor, segment::Segment}; + +lazy_static! { + pub static ref GLOBAL_STORAGE: WatchdogStorage = WatchdogStorage::new(32768usize).unwrap(); +} + +pub struct WatchdogStorage { + available: Arc>>, +} + +// TODO: expand and shrink Storage when needed +// OR +// support multiple descriptor assignment (allow multiple buffers to be assigned to the same watchdog) +impl WatchdogStorage { + pub fn new(initial_watchdog_count: usize) -> ZResult { + let segment = Arc::new(Segment::create(initial_watchdog_count)?); + + let mut initially_available = BTreeSet::default(); + let subsegments = segment.array.elem_count(); + for subsegment in 0..subsegments { + let atomic = unsafe { segment.array.elem(subsegment as u32) }; + + for bit in 0..64 { + let mask = 1u64 << bit; + let descriptor = OwnedDescriptor::new(segment.clone(), atomic, mask); + let _new_insert = initially_available.insert(descriptor); + #[cfg(feature = "test")] + assert!(_new_insert); + } + } + + Ok(Self { + available: Arc::new(Mutex::new(initially_available)), + }) + } + + pub fn allocate_watchdog(&self) -> ZResult { + let mut guard = self.available.lock().map_err(|e| zerror!("{e}"))?; + let popped = guard.pop_first(); + drop(guard); + + let allocated = + AllocatedWatchdog::new(popped.ok_or_else(|| zerror!("no free watchdogs available"))?); + + Ok(allocated) + } + + pub(crate) fn free_watchdog(&self, descriptor: OwnedDescriptor) { + if let Ok(mut guard) = self.available.lock() { + let _new_insert = guard.insert(descriptor); + #[cfg(feature = "test")] + assert!(_new_insert); + } + } +} diff --git a/commons/zenoh-shm/src/watchdog/validator.rs b/commons/zenoh-shm/src/watchdog/validator.rs new file mode 100644 index 0000000000..d28dfa8e3c --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/validator.rs @@ -0,0 +1,102 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{collections::BTreeMap, sync::Arc, time::Duration}; + +use lazy_static::lazy_static; + +use super::{descriptor::OwnedDescriptor, periodic_task::PeriodicTask}; + +pub(super) type InvalidateCallback = Box; + +lazy_static! { + pub static ref GLOBAL_VALIDATOR: WatchdogValidator = + WatchdogValidator::new(Duration::from_millis(100)); +} + +enum Transaction { + Add(InvalidateCallback), + Remove, +} + +#[derive(Default)] +struct ValidatedStorage { + transactions: lockfree::queue::Queue<(Transaction, OwnedDescriptor)>, +} + +impl ValidatedStorage { + fn add(&self, descriptor: OwnedDescriptor, on_invalidated: InvalidateCallback) { + self.transactions + .push((Transaction::Add(on_invalidated), descriptor)); + } + + fn remove(&self, descriptor: OwnedDescriptor) { + self.transactions.push((Transaction::Remove, descriptor)); + } + + fn collect_transactions(&self, storage: &mut BTreeMap) { + while let Some((transaction, descriptor)) = self.transactions.pop() { + match transaction { + Transaction::Add(on_invalidated) => { + let _old = storage.insert(descriptor, on_invalidated); + #[cfg(feature = "test")] + assert!(_old.is_none()); + } + Transaction::Remove => { + let _ = storage.remove(&descriptor); + } + } + } + } +} + +// TODO: optimize validation by packing descriptors +pub struct WatchdogValidator { + storage: Arc, + _task: PeriodicTask, +} + +impl WatchdogValidator { + pub fn new(interval: Duration) -> Self { + let storage = Arc::new(ValidatedStorage::default()); + + let c_storage = storage.clone(); + let mut watchdogs = BTreeMap::default(); + let task = PeriodicTask::new("Watchdog Validator".to_owned(), interval, move || { + c_storage.collect_transactions(&mut watchdogs); + + watchdogs.retain(|watchdog, on_invalidated| { + let old_val = watchdog.validate(); + if old_val == 0 { + on_invalidated(); + return false; + } + true + }); + }); + + Self { + storage, + _task: task, + } + } + + pub fn add(&self, watchdog: OwnedDescriptor, on_invalidated: InvalidateCallback) { + self.storage.add(watchdog, on_invalidated); + } + + pub fn remove(&self, watchdog: OwnedDescriptor) { + self.storage.remove(watchdog); + } +} diff --git a/commons/zenoh-shm/tests/common/mod.rs b/commons/zenoh-shm/tests/common/mod.rs new file mode 100644 index 0000000000..a97773f686 --- /dev/null +++ b/commons/zenoh-shm/tests/common/mod.rs @@ -0,0 +1,105 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{atomic::AtomicBool, Arc}, + thread::JoinHandle, +}; + +use zenoh_result::ZResult; + +pub const TEST_SEGMENT_PREFIX: &str = "test"; + +pub fn validate_memory(mem1: &mut [u8], mem2: &[u8]) { + assert!(mem1.len() == mem2.len()); + for cycle in 0..255u8 { + // sequentially fill segment1 with values checking segment2 having these changes + for i in 0..mem1.len() { + mem1[i] = cycle; + assert!(mem2[i] == cycle); + } + + // check the whole segment2 having proper values + for i in mem2 { + assert!(*i == cycle); + } + } +} + +pub fn execute_concurrent(concurrent_tasks: usize, iterations: usize, task_fun: TaskFun) +where + TaskFun: Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static, +{ + let mut tasks = vec![]; + for task_index in 0..concurrent_tasks { + let c_task_fun = task_fun.clone(); + let task_handle = std::thread::spawn(move || { + for iteration in 0..iterations { + if let Err(e) = c_task_fun(task_index, iteration) { + panic!("task {task_index}: iteration {iteration}: {e}") + } + } + }); + tasks.push(task_handle); + } + for task in tasks { + task.join().expect("Error joining thread!"); + } +} + +pub fn load_fn( + working: Arc, +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + move |_task_index: usize, _iteration: usize| -> ZResult<()> { + while working.load(std::sync::atomic::Ordering::SeqCst) {} + Ok(()) + } +} + +pub struct CpuLoad { + handle: Option>, + flag: Arc, +} + +impl Drop for CpuLoad { + fn drop(&mut self) { + self.flag.store(false, std::sync::atomic::Ordering::SeqCst); + let _ = self.handle.take().unwrap().join(); + } +} + +impl CpuLoad { + pub fn exessive() -> Self { + Self::new(1000) + } + + pub fn optimal_high() -> Self { + Self::new(num_cpus::get()) + } + + pub fn low() -> Self { + Self::new(1) + } + + fn new(thread_count: usize) -> Self { + let flag = Arc::new(AtomicBool::new(true)); + + let c_flag = flag.clone(); + let handle = Some(std::thread::spawn(move || { + execute_concurrent(thread_count, 1, load_fn(c_flag)); + })); + + Self { handle, flag } + } +} diff --git a/commons/zenoh-shm/tests/header.rs b/commons/zenoh-shm/tests/header.rs new file mode 100644 index 0000000000..a734abf108 --- /dev/null +++ b/commons/zenoh-shm/tests/header.rs @@ -0,0 +1,130 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::Ordering::Relaxed; + +use rand::Rng; +use zenoh_result::ZResult; +use zenoh_shm::header::{ + descriptor::HeaderDescriptor, storage::GLOBAL_HEADER_STORAGE, + subscription::GLOBAL_HEADER_SUBSCRIPTION, +}; + +pub mod common; +use common::execute_concurrent; + +fn header_alloc_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let _allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + Ok(()) + } +} + +#[test] +fn header_alloc() { + execute_concurrent(1, 1000, header_alloc_fn()); +} + +#[test] +fn header_alloc_concurrent() { + execute_concurrent(100, 1000, header_alloc_fn()); +} + +fn header_link_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| { + let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let descr = HeaderDescriptor::from(&allocated_header.descriptor); + let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.link(&descr)?; + Ok(()) + } +} + +#[test] +fn header_link() { + execute_concurrent(1, 1000, header_link_fn()); +} + +#[test] +fn header_link_concurrent() { + execute_concurrent(100, 1000, header_link_fn()); +} + +fn header_link_failure_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static +{ + |_task_index: usize, _iteration: usize| { + let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let descr = HeaderDescriptor::from(&allocated_header.descriptor); + drop(allocated_header); + + // Some comments on this behaviour... + // Even though the allocated_header is dropped, it's SHM segment still exists in GLOBAL_HEADER_STORAGE, + // so there is no way to detect that header is "deallocated" and the code below succeeds. The invalidation + // funcionality is implemented on higher level by means of generation mechanism and protects from both header + // and watchdog link-to-deallocated issues. This generation mechanism depends on the behaviour below, so + // everything is fair :) + let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.link(&descr)?; + Ok(()) + } +} + +#[test] +fn header_link_failure() { + execute_concurrent(1, 1000, header_link_failure_fn()); +} + +#[test] +fn header_link_failure_concurrent() { + execute_concurrent(100, 1000, header_link_failure_fn()); +} + +fn header_check_memory_fn(parallel_tasks: usize, iterations: usize) { + let task_fun = |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated_header = GLOBAL_HEADER_STORAGE.allocate_header()?; + let descr = HeaderDescriptor::from(&allocated_header.descriptor); + let linked_header = GLOBAL_HEADER_SUBSCRIPTION.link(&descr)?; + + let mut rng = rand::thread_rng(); + let allocated = allocated_header.descriptor.header(); + let linked = linked_header.header(); + for _ in 0..100 { + let gen = rng.gen(); + allocated.generation.store(gen, Relaxed); + assert_eq!(gen, linked.generation.load(Relaxed)); + + let rc = rng.gen(); + allocated.refcount.store(rc, Relaxed); + assert_eq!(rc, linked.refcount.load(Relaxed)); + + let watchdog_inv = rng.gen(); + allocated.watchdog_invalidated.store(watchdog_inv, Relaxed); + assert_eq!(watchdog_inv, linked.watchdog_invalidated.load(Relaxed)); + + assert_eq!(gen, linked.generation.load(Relaxed)); + assert_eq!(rc, linked.refcount.load(Relaxed)); + assert_eq!(watchdog_inv, linked.watchdog_invalidated.load(Relaxed)); + } + Ok(()) + }; + execute_concurrent(parallel_tasks, iterations, task_fun); +} + +#[test] +fn header_check_memory() { + header_check_memory_fn(1, 1000); +} + +#[test] +fn header_check_memory_concurrent() { + header_check_memory_fn(100, 100); +} diff --git a/commons/zenoh-shm/tests/periodic_task.rs b/commons/zenoh-shm/tests/periodic_task.rs new file mode 100644 index 0000000000..dcfd560d7d --- /dev/null +++ b/commons/zenoh-shm/tests/periodic_task.rs @@ -0,0 +1,172 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{Arc, Mutex}, + time::{Duration, Instant}, +}; + +use zenoh_shm::watchdog::periodic_task::PeriodicTask; + +pub mod common; +use common::CpuLoad; + +const TASK_PERIOD: Duration = Duration::from_millis(50); +const TASK_DELTA: Duration = Duration::from_millis(5); +const TEST_TASK: Duration = Duration::from_millis(10); + +fn intensive_payload(duration: Duration) -> impl Fn() + Send + 'static { + move || { + let start = Instant::now(); + while start.elapsed() < duration { + for _i in 0..100 {} + } + } +} + +fn blocking_payload(duration: Duration) -> impl Fn() + Send + 'static { + move || { + std::thread::sleep(duration); + } +} + +fn check_duration(duration: &Duration) { + let min = TASK_PERIOD - TASK_DELTA; + let max = TASK_PERIOD + TASK_DELTA; + + assert!(min <= *duration && *duration <= max); +} + +fn make_task(task_payload: F) -> (PeriodicTask, Arc>>) +where + F: Fn() + Send + 'static, +{ + let intervals = Arc::new(Mutex::new(vec![])); + + let c_intervals = intervals.clone(); + let mut start: Option = None; + let task = PeriodicTask::new("test".to_owned(), TASK_PERIOD, move || { + if let Some(val) = &start { + let elapsed = val.elapsed(); + c_intervals.lock().unwrap().push(elapsed); + } + start = Some(Instant::now()); + task_payload(); + }); + + (task, intervals) +} + +#[test] +#[ignore] +fn periodic_task_create() { + let (_task, _intervals) = make_task(|| {}); +} + +fn check_task(task_payload: F) +where + F: Fn() + Send + 'static, +{ + let n = 100; + let (task, intervals) = make_task(task_payload); + + std::thread::sleep(TASK_PERIOD * n); + drop(task); + + let guard = intervals.lock().unwrap(); + for duration in &*guard { + check_duration(duration); + } +} + +#[test] +#[ignore] +fn periodic_task_lightweight() { + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_blocking() { + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_intensive() { + check_task(intensive_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_low_load_lightweight() { + let _load = CpuLoad::low(); + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_low_load_blocking() { + let _load = CpuLoad::low(); + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_low_load_intensive() { + let _load = CpuLoad::low(); + check_task(intensive_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_optimal_high_load_lightweight() { + let _load = CpuLoad::optimal_high(); + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_optimal_high_load_blocking() { + let _load = CpuLoad::optimal_high(); + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_optimal_high_load_intensive() { + let _load = CpuLoad::optimal_high(); + check_task(intensive_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_exessive_load_lightweight() { + let _load = CpuLoad::exessive(); + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_exessive_load_blocking() { + let _load = CpuLoad::exessive(); + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_exessive_load_intensive() { + let _load = CpuLoad::exessive(); + check_task(intensive_payload(TEST_TASK)); +} diff --git a/commons/zenoh-shm/tests/posix_array.rs b/commons/zenoh-shm/tests/posix_array.rs new file mode 100644 index 0000000000..562102ea17 --- /dev/null +++ b/commons/zenoh-shm/tests/posix_array.rs @@ -0,0 +1,161 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Debug, mem::size_of}; + +use num_traits::{AsPrimitive, PrimInt, Unsigned}; +use zenoh_shm::posix_shm::array::ArrayInSHM; + +pub mod common; +use common::TEST_SEGMENT_PREFIX; + +type TestSegmentID = u32; + +#[derive(Debug)] +#[stabby::stabby] +struct TestElem { + value: u32, +} + +impl TestElem { + fn fill(&mut self, counter: &mut u32) { + self.value = *counter; + *counter += 1; + } + + fn validate(&self, counter: &mut u32) { + assert_eq!(self.value, *counter); + *counter += 1; + } +} + +fn validate_array( + array1: &mut ArrayInSHM, + array2: &ArrayInSHM, + expected_elem_count: usize, +) where + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive, + isize: AsPrimitive, + usize: AsPrimitive, +{ + assert!(array1.elem_count() == expected_elem_count); + assert!(array2.elem_count() == expected_elem_count); + + let mut fill_ctr = 0; + let mut validate_ctr = 0; + + // first of all, fill and validate elements sequentially + for i in 0..array1.elem_count() { + unsafe { + let elem1 = &mut *array1.elem_mut(i.as_()); + let elem2 = &*array2.elem(i.as_()); + + elem1.fill(&mut fill_ctr); + elem2.validate(&mut validate_ctr); + } + } + + // then fill all the elements... + for i in 0..array1.elem_count() { + unsafe { + let elem1 = &mut *array1.elem_mut(i.as_()); + elem1.fill(&mut fill_ctr); + } + } + + // ...and validate all the elements + for i in 0..array2.elem_count() { + unsafe { + let elem2 = &*array2.elem(i.as_()); + elem2.validate(&mut validate_ctr); + } + } +} + +fn test_array() +where + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive, + isize: AsPrimitive, + usize: AsPrimitive, +{ + // Estimate elem count to test + // NOTE: for index sizes <= 16 bit we use the whole index range to test, + // and for bigger indexes we use limited index range + let elem_count = { + match size_of::() > size_of::() { + true => 100, + false => ElemIndex::max_value().as_() + 1, + } + }; + + let mut new_arr: ArrayInSHM = + ArrayInSHM::create(elem_count, TEST_SEGMENT_PREFIX).expect("error creating new array!"); + + let opened_arr: ArrayInSHM<_, TestElem, ElemIndex> = + ArrayInSHM::open(new_arr.id(), TEST_SEGMENT_PREFIX).expect("error opening existing array!"); + + validate_array(&mut new_arr, &opened_arr, elem_count); +} + +/// MEMORY CHECKS /// + +#[test] +fn arr_u8_index_memory_test() { + test_array::(); +} + +#[test] +fn arr_u16_index_memory_test() { + test_array::(); +} + +#[test] +fn arr_u32_index_memory_test() { + test_array::(); +} + +/// ELEM COUNT CHECKS /// + +fn test_invalid_elem_index() +where + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive + Debug, + isize: AsPrimitive, + usize: AsPrimitive, +{ + let invalid_elem_count = ElemIndex::max_value().as_() + 2; + + let _ = ArrayInSHM::::create( + invalid_elem_count, + TEST_SEGMENT_PREFIX, + ) + .expect_err( + format!("must fail: element count {invalid_elem_count} is out of range for ElemIndex!") + .as_str(), + ); +} + +#[test] +fn arr_u8_index_invalid_elem_count() { + test_invalid_elem_index::(); +} + +#[test] +fn arr_u16_index_invalid_elem_count() { + test_invalid_elem_index::(); +} + +#[test] +fn arr_u32_index_invalid_elem_count() { + test_invalid_elem_index::(); +} diff --git a/commons/zenoh-shm/tests/posix_segment.rs b/commons/zenoh-shm/tests/posix_segment.rs new file mode 100644 index 0000000000..907f70cc4e --- /dev/null +++ b/commons/zenoh-shm/tests/posix_segment.rs @@ -0,0 +1,136 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Display, slice}; + +use zenoh_shm::posix_shm::segment::Segment; + +pub mod common; +use common::{validate_memory, TEST_SEGMENT_PREFIX}; + +fn validate_segment(segment1: &Segment, segment2: &Segment) +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + assert!(segment1.len() == segment2.len()); + + let ptr1 = segment1.as_ptr(); + let ptr2 = segment2.as_ptr(); + + let slice1 = unsafe { slice::from_raw_parts_mut(ptr1, segment1.len()) }; + let slice2 = unsafe { slice::from_raw_parts(ptr2, segment2.len()) }; + + validate_memory(slice1, slice2); +} + +fn test_segment() +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Copy + Clone + Display, +{ + let new_segment: Segment = + Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); + + let opened_segment_instance_1 = Segment::open(new_segment.id(), TEST_SEGMENT_PREFIX) + .expect("error opening existing segment!"); + + validate_segment(&new_segment, &opened_segment_instance_1); + + let opened_segment_instance_2 = Segment::open(new_segment.id(), TEST_SEGMENT_PREFIX) + .expect("error opening existing segment!"); + + validate_segment(&new_segment, &opened_segment_instance_1); + validate_segment(&new_segment, &opened_segment_instance_2); + + drop(opened_segment_instance_1); + validate_segment(&new_segment, &opened_segment_instance_2); +} + +/// UNSIGNED /// + +#[test] +fn segment_u8_id() { + test_segment::() +} + +#[test] +fn segment_u16_id() { + test_segment::() +} + +#[test] +fn segment_u32_id() { + test_segment::() +} + +#[test] +fn segment_u64_id() { + test_segment::() +} + +#[test] +fn segment_u128_id() { + test_segment::() +} + +/// SIGNED /// + +#[test] +fn segment_i8_id() { + test_segment::() +} + +#[test] +fn segment_i16_id() { + test_segment::() +} + +#[test] +fn segment_i32_id() { + test_segment::() +} + +#[test] +fn segment_i64_id() { + test_segment::() +} + +#[test] +fn segment_i128_id() { + test_segment::() +} + +/// Behaviour checks /// + +#[test] +fn segment_open() { + let new_segment: Segment = + Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); + + let _opened_segment = Segment::open(new_segment.id(), TEST_SEGMENT_PREFIX) + .expect("error opening existing segment!"); +} + +#[test] +fn segment_open_error() { + let id = { + let new_segment: Segment = + Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); + new_segment.id() + }; + + let _opened_segment = Segment::open(id, TEST_SEGMENT_PREFIX) + .expect_err("must fail: opened not existing segment!"); +} diff --git a/commons/zenoh-shm/tests/posix_shm_provider.rs b/commons/zenoh-shm/tests/posix_shm_provider.rs new file mode 100644 index 0000000000..4c27879623 --- /dev/null +++ b/commons/zenoh-shm/tests/posix_shm_provider.rs @@ -0,0 +1,117 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use zenoh_shm::api::{ + client::shared_memory_client::SharedMemoryClient, + protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + }, + provider::{ + shared_memory_provider_backend::SharedMemoryProviderBackend, + types::{AllocAlignment, MemoryLayout}, + }, +}; + +static BUFFER_NUM: usize = 100; +static BUFFER_SIZE: usize = 1024; + +#[test] +fn posix_shm_provider_create() { + let _backend = PosixSharedMemoryProviderBackend::builder() + .with_size(1024) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixSharedMemoryProviderBackend!"); +} + +#[test] +fn posix_shm_provider_alloc() { + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(1024) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixSharedMemoryProviderBackend!"); + + let layout = MemoryLayout::new(100, AllocAlignment::default()).unwrap(); + + let _buf = backend + .alloc(&layout) + .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); +} + +#[test] +fn posix_shm_provider_open() { + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(1024) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixSharedMemoryProviderBackend!"); + + let layout = MemoryLayout::new(100, AllocAlignment::default()).unwrap(); + + let buf = backend + .alloc(&layout) + .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); + + let client = PosixSharedMemoryClient {}; + + let _segment = client + .attach(buf.descriptor.segment) + .expect("Error attaching to segment"); +} + +#[test] +fn posix_shm_provider_allocator() { + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(BUFFER_SIZE * BUFFER_NUM) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixSharedMemoryProviderBackend!"); + + let layout = MemoryLayout::new(BUFFER_SIZE, AllocAlignment::default()).unwrap(); + + // exaust memory by allocating it all + let mut buffers = vec![]; + for _ in 0..BUFFER_NUM { + let buf = backend + .alloc(&layout) + .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); + buffers.push(buf); + } + + for _ in 0..BUFFER_NUM { + // there is nothing to allocate at this point + assert_eq!(backend.available(), 0); + assert!(backend.alloc(&layout).is_err()); + + // free buffer + let to_free = buffers.pop().unwrap().descriptor; + backend.free(&to_free); + + // allocate new one + let buf = backend + .alloc(&layout) + .expect("PosixSharedMemoryProviderBackend: error allocating buffer"); + buffers.push(buf); + } + + // free everything + while let Some(buffer) = buffers.pop() { + backend.free(&buffer.descriptor); + } + + // confirm that allocator is free + assert_eq!(backend.available(), BUFFER_NUM * BUFFER_SIZE); +} diff --git a/commons/zenoh-shm/tests/watchdog.rs b/commons/zenoh-shm/tests/watchdog.rs new file mode 100644 index 0000000000..fe1ccd2ab2 --- /dev/null +++ b/commons/zenoh-shm/tests/watchdog.rs @@ -0,0 +1,311 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{atomic::AtomicBool, Arc}, + time::Duration, +}; + +use zenoh_result::{bail, ZResult}; +use zenoh_shm::watchdog::{ + confirmator::GLOBAL_CONFIRMATOR, storage::GLOBAL_STORAGE, validator::GLOBAL_VALIDATOR, +}; + +pub mod common; +use common::{execute_concurrent, CpuLoad}; + +const VALIDATION_PERIOD: Duration = Duration::from_millis(100); +const CONFIRMATION_PERIOD: Duration = Duration::from_millis(50); + +fn watchdog_alloc_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let _allocated = GLOBAL_STORAGE.allocate_watchdog()?; + Ok(()) + } +} + +#[test] +fn watchdog_alloc() { + execute_concurrent(1, 10000, watchdog_alloc_fn()); +} + +#[test] +fn watchdog_alloc_concurrent() { + execute_concurrent(1000, 10000, watchdog_alloc_fn()); +} + +fn watchdog_confirmed_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE.allocate_watchdog()?; + let confirmed = GLOBAL_CONFIRMATOR.add_owned(&allocated.descriptor)?; + + // check that the confirmed watchdog stays valid + for i in 0..10 { + std::thread::sleep(VALIDATION_PERIOD); + let valid = confirmed.owned.test_validate() != 0; + if !valid { + bail!("Invalid watchdog, iteration {i}"); + } + } + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_confirmed() { + execute_concurrent(1, 10, watchdog_confirmed_fn()); +} + +#[test] +#[ignore] +fn watchdog_confirmed_concurrent() { + execute_concurrent(1000, 10, watchdog_confirmed_fn()); +} + +// TODO: confirmation to dangling watchdog actually writes to potentially-existing +// other watchdog instance from other test running in the same process and changes it's behaviour, +// so we cannot run dangling test in parallel with anything else +#[test] +#[ignore] +fn watchdog_confirmed_dangling() { + let allocated = GLOBAL_STORAGE + .allocate_watchdog() + .expect("error allocating watchdog!"); + let confirmed = GLOBAL_CONFIRMATOR + .add_owned(&allocated.descriptor) + .expect("error adding watchdog to confirmator!"); + drop(allocated); + + // confirm dangling (not allocated) watchdog + for _ in 0..10 { + std::thread::sleep(VALIDATION_PERIOD); + confirmed.owned.confirm(); + } +} + +fn watchdog_validated_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE.allocate_watchdog()?; + let confirmed = GLOBAL_CONFIRMATOR.add_owned(&allocated.descriptor)?; + + let valid = Arc::new(AtomicBool::new(true)); + { + let c_valid = valid.clone(); + GLOBAL_VALIDATOR.add( + allocated.descriptor.clone(), + Box::new(move || { + c_valid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + // check that the watchdog stays valid as it is confirmed + for i in 0..10 { + std::thread::sleep(VALIDATION_PERIOD); + if !valid.load(std::sync::atomic::Ordering::SeqCst) { + bail!("Invalid watchdog, iteration {i}"); + } + } + + // Worst-case timings: + // validation: |___________|___________|___________|___________| + // confirmation: __|_____|_____|_____|_____| + // drop(confirmed): ^ + // It means that the worst-case latency for the watchdog to become invalid is VALIDATION_PERIOD*2 + + // check that the watchdog becomes invalid once we stop it's confirmation + drop(confirmed); + std::thread::sleep(VALIDATION_PERIOD * 3 + CONFIRMATION_PERIOD); + assert!(!valid.load(std::sync::atomic::Ordering::SeqCst)); + + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated() { + execute_concurrent(1, 10, watchdog_validated_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_concurrent() { + execute_concurrent(1000, 10, watchdog_validated_fn()); +} + +fn watchdog_validated_invalid_without_confirmator_fn( +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE + .allocate_watchdog() + .expect("error allocating watchdog!"); + + let valid = Arc::new(AtomicBool::new(true)); + { + let c_valid = valid.clone(); + GLOBAL_VALIDATOR.add( + allocated.descriptor.clone(), + Box::new(move || { + c_valid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + assert!(allocated.descriptor.test_validate() == 0); + + // check that the watchdog becomes invalid because we do not confirm it + std::thread::sleep(VALIDATION_PERIOD * 2 + CONFIRMATION_PERIOD); + assert!(!valid.load(std::sync::atomic::Ordering::SeqCst)); + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated_invalid_without_confirmator() { + execute_concurrent(1, 10, watchdog_validated_invalid_without_confirmator_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_invalid_without_confirmator_concurrent() { + execute_concurrent( + 1000, + 10, + watchdog_validated_invalid_without_confirmator_fn(), + ); +} + +fn watchdog_validated_additional_confirmation_fn( +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE + .allocate_watchdog() + .expect("error allocating watchdog!"); + let confirmed = GLOBAL_CONFIRMATOR + .add_owned(&allocated.descriptor) + .expect("error adding watchdog to confirmator!"); + + let allow_invalid = Arc::new(AtomicBool::new(false)); + { + let c_allow_invalid = allow_invalid.clone(); + GLOBAL_VALIDATOR.add( + allocated.descriptor.clone(), + Box::new(move || { + assert!(c_allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + c_allow_invalid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + // make additional confirmations + for _ in 0..100 { + std::thread::sleep(VALIDATION_PERIOD / 10); + confirmed.owned.confirm(); + } + + // check that the watchdog stays valid as we stop additional confirmation + std::thread::sleep(VALIDATION_PERIOD * 10); + + // Worst-case timings: + // validation: |___________|___________|___________|___________| + // confirmation: __|_____|_____|_____|_____| + // drop(confirmed): ^ + // It means that the worst-case latency for the watchdog to become invalid is VALIDATION_PERIOD*2 + + // check that the watchdog becomes invalid once we stop it's regular confirmation + drop(confirmed); + allow_invalid.store(true, std::sync::atomic::Ordering::SeqCst); + std::thread::sleep(VALIDATION_PERIOD * 2 + CONFIRMATION_PERIOD); + // check that invalidation event happened! + assert!(!allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated_additional_confirmation() { + execute_concurrent(1, 10, watchdog_validated_additional_confirmation_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_additional_confirmation_concurrent() { + execute_concurrent(1000, 10, watchdog_validated_additional_confirmation_fn()); +} + +fn watchdog_validated_overloaded_system_fn( +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE + .allocate_watchdog() + .expect("error allocating watchdog!"); + let confirmed = GLOBAL_CONFIRMATOR + .add_owned(&allocated.descriptor) + .expect("error adding watchdog to confirmator!"); + + let allow_invalid = Arc::new(AtomicBool::new(false)); + { + let c_allow_invalid = allow_invalid.clone(); + GLOBAL_VALIDATOR.add( + allocated.descriptor.clone(), + Box::new(move || { + assert!(c_allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + c_allow_invalid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + // check that the watchdog stays valid + std::thread::sleep(VALIDATION_PERIOD * 10); + + // Worst-case timings: + // validation: |___________|___________|___________|___________| + // confirmation: __|_____|_____|_____|_____| + // drop(confirmed): ^ + // It means that the worst-case latency for the watchdog to become invalid is VALIDATION_PERIOD*2 + + // check that the watchdog becomes invalid once we stop it's regular confirmation + drop(confirmed); + allow_invalid.store(true, std::sync::atomic::Ordering::SeqCst); + std::thread::sleep(VALIDATION_PERIOD * 2 + CONFIRMATION_PERIOD); + // check that invalidation event happened! + assert!(!allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated_low_load() { + let _load = CpuLoad::low(); + execute_concurrent(1000, 10, watchdog_validated_overloaded_system_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_high_load() { + let _load = CpuLoad::optimal_high(); + execute_concurrent(1000, 10, watchdog_validated_overloaded_system_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_overloaded_system() { + let _load = CpuLoad::exessive(); + execute_concurrent(1000, 10, watchdog_validated_overloaded_system_fn()); +} diff --git a/commons/zenoh-sync/src/object_pool.rs b/commons/zenoh-sync/src/object_pool.rs index 83b673c449..3386b2058b 100644 --- a/commons/zenoh-sync/src/object_pool.rs +++ b/commons/zenoh-sync/src/object_pool.rs @@ -141,10 +141,12 @@ impl ZSliceBuffer for RecyclingObject> { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 2027133a1e..e117507ae9 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -27,19 +27,10 @@ readme = "README.md" publish = false [features] -shared-memory = ["zenoh-shm", "zenoh/shared-memory"] +shared-memory = ["zenoh/shared-memory"] unstable = ["zenoh/unstable"] transport_unixpipe = ["zenoh/transport_unixpipe"] -# Unfortunately, the feature "transport_unixpipe" is always -# enabled for the lines below. It looks like a Cargo bug :( -# -# [target.'cfg(unix)'.dependencies] -# zenoh = { workspace = true, features = ["transport_unixpipe"] } -# -# [target.'cfg(not(unix))'.dependencies] -# zenoh = { workspace = true } - [dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "time", "io-std"] } clap = { workspace = true, features = ["derive"] } @@ -52,7 +43,6 @@ zenoh-collections = { workspace = true } tracing = { workspace = true } zenoh = { workspace = true, default-features = true } zenoh-ext = { workspace = true } -zenoh-shm = { workspace = true, optional = true } [dev-dependencies] rand = { workspace = true, features = ["default"] } @@ -91,7 +81,7 @@ path = "examples/z_pub.rs" [[example]] name = "z_pub_shm" path = "examples/z_pub_shm.rs" -required-features = ["shared-memory"] +required-features = ["unstable", "shared-memory"] [[example]] name = "z_sub" @@ -100,7 +90,7 @@ path = "examples/z_sub.rs" [[example]] name = "z_sub_shm" path = "examples/z_sub_shm.rs" -required-features = ["shared-memory"] +required-features = ["unstable", "shared-memory"] [[example]] name = "z_pull" @@ -148,12 +138,22 @@ path = "examples/z_sub_thr.rs" [[example]] name = "z_pub_shm_thr" path = "examples/z_pub_shm_thr.rs" -required-features = ["shared-memory"] +required-features = ["unstable", "shared-memory"] [[example]] name = "z_ping" path = "examples/z_ping.rs" +[[example]] +name = "z_ping_shm" +path = "examples/z_ping_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_pong" path = "examples/z_pong.rs" + +[[example]] +name = "z_alloc_shm" +path = "examples/z_alloc_shm.rs" +required-features = ["unstable", "shared-memory"] \ No newline at end of file diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs new file mode 100644 index 0000000000..a6afb1190c --- /dev/null +++ b/examples/examples/z_alloc_shm.rs @@ -0,0 +1,136 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::prelude::r#async::*; +use zenoh::shm::protocol_implementations::posix::posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend; +use zenoh::shm::protocol_implementations::posix::protocol_id::POSIX_PROTOCOL_ID; +use zenoh::shm::provider::shared_memory_provider::{ + BlockOn, GarbageCollect, SharedMemoryProviderBuilder, +}; +use zenoh::shm::provider::shared_memory_provider::{Deallocate, Defragment}; +use zenoh::shm::provider::types::{AllocAlignment, MemoryLayout}; +use zenoh::Result; + +#[tokio::main] +async fn main() { + // Initiate logging + zenoh_util::try_init_log_from_env(); + run().await.unwrap() +} + +async fn run() -> Result<()> { + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(65536, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Create a layout for particular allocation arguments and particular SHM provider + // The layout is validated for argument correctness and also is checked + // against particular SHM provider's layouting capabilities. + // This layout is reusable and can handle series of similar allocations + let buffer_layout = { + // OPTION 1: Simple (default) configuration: + let simple_layout = shared_memory_provider + .alloc_layout() + .size(512) + .res() + .unwrap(); + + // OPTION 2: Comprehensive configuration: + let _comprehensive_layout = shared_memory_provider + .alloc_layout() + .size(512) + .alignment(AllocAlignment::new(2)) + .res() + .unwrap(); + + simple_layout + }; + + // Allocate SharedMemoryBuf + // Policy is a generics-based API to describe necessary allocation behaviour + // that will be higly optimized at compile-time. + // Policy resolvable can be sync and async. + // The basic policies are: + // -JustAlloc (sync) + // -GarbageCollect (sync) + // -Deallocate (sync) + // --contains own set of dealloc policy generics: + // ---DeallocateYoungest + // ---DeallocateEldest + // ---DeallocateOptimal + // -BlockOn (sync and async) + let mut sbuf = async { + // Some examples on how to use layout's interface: + + // The default allocation with default JustAlloc policy + let default_alloc = buffer_layout.alloc().res().unwrap(); + + // The async allocation + let _async_alloc = buffer_layout + .alloc() + .with_policy::() + .res_async() + .await + .unwrap(); + + // The comprehensive allocation policy that blocks if provider is not able to allocate + let _comprehensive_alloc = buffer_layout + .alloc() + .with_policy::>>() + .res() + .unwrap(); + + // The comprehensive allocation policy that deallocates up to 1000 buffers if provider is not able to allocate + let _comprehensive_alloc = buffer_layout + .alloc() + .with_policy::>>() + .res() + .unwrap(); + + default_alloc + } + .await; + + // Fill recently-allocated buffer with data + sbuf[0..8].fill(0); + + // Declare Session and Publisher (common code) + let session = zenoh::open(Config::default()).res_async().await?; + let publisher = session.declare_publisher("my/key/expr").res_async().await?; + + // Publish SHM buffer + publisher.put(sbuf).res_async().await +} diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs new file mode 100644 index 0000000000..08c08276d4 --- /dev/null +++ b/examples/examples/z_ping_shm.rs @@ -0,0 +1,147 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use std::time::{Duration, Instant}; +use zenoh::buffers::ZSlice; +use zenoh::config::Config; +use zenoh::prelude::sync::*; +use zenoh::publication::CongestionControl; +use zenoh::shm::protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, +}; +use zenoh::shm::provider::shared_memory_provider::SharedMemoryProviderBuilder; +use zenoh::shm::provider::types::AllocAlignment; +use zenoh::shm::provider::types::MemoryLayout; +use zenoh_examples::CommonArgs; + +fn main() { + // Initiate logging + zenoh_util::try_init_log_from_env(); + + let (mut config, warmup, size, n) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_ping_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + let session = zenoh::open(config).res().unwrap(); + + // The key expression to publish data on + let key_expr_ping = keyexpr::new("test/ping").unwrap(); + + // The key expression to wait the response back + let key_expr_pong = keyexpr::new("test/pong").unwrap(); + + let sub = session.declare_subscriber(key_expr_pong).res().unwrap(); + let publisher = session + .declare_publisher(key_expr_ping) + .congestion_control(CongestionControl::Block) + .res() + .unwrap(); + + let mut samples = Vec::with_capacity(n); + + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + let buf = shared_memory_provider + .alloc_layout() + .size(size) + .res() + .unwrap() + .alloc() + .res() + .unwrap(); + + // convert ZSliceShmMut into ZSlice as ZSliceShmMut does not support Clone + let buf: ZSlice = buf.into(); + + // -- warmup -- + println!("Warming up for {warmup:?}..."); + let now = Instant::now(); + while now.elapsed() < warmup { + publisher.put(buf.clone()).res().unwrap(); + let _ = sub.recv().unwrap(); + } + + for _ in 0..n { + let buf = buf.clone(); + let write_time = Instant::now(); + publisher.put(buf).res().unwrap(); + + let _ = sub.recv(); + let ts = write_time.elapsed().as_micros(); + samples.push(ts); + } + + for (i, rtt) in samples.iter().enumerate().take(n) { + println!( + "{} bytes: seq={} rtt={:?}µs lat={:?}µs", + size, + i, + rtt, + rtt / 2 + ); + } +} + +#[derive(Parser)] +struct Args { + #[arg(short, long, default_value = "1")] + /// The number of seconds to warm up (float) + warmup: f64, + #[arg(short = 'n', long, default_value = "100")] + /// The number of round-trips to measure + samples: usize, + /// Sets the size of the payload to publish + payload_size: usize, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, Duration, usize, usize) { + let args = Args::parse(); + ( + args.common.into(), + Duration::from_secs_f64(args.warmup), + args.payload_size, + args.samples, + ) +} diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index aa691f0425..4c2e41ab18 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -21,7 +21,12 @@ fn main() { // initiate logging zenoh_util::try_init_log_from_env(); - let (config, express) = parse_args(); + let (mut config, express) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_ping_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); let session = zenoh::open(config).res().unwrap().into_arc(); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 542cff3b6d..8287509f1b 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -12,10 +12,16 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::shm::SharedMemoryManager; +use zenoh::shm::protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, +}; +use zenoh::shm::provider::shared_memory_provider::SharedMemoryProviderBuilder; +use zenoh::shm::provider::shared_memory_provider::{BlockOn, GarbageCollect}; +use zenoh::shm::provider::types::AllocAlignment; +use zenoh::shm::provider::types::MemoryLayout; use zenoh_examples::CommonArgs; const N: usize = 10; @@ -36,72 +42,71 @@ async fn main() -> Result<(), zenoh::Error> { println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); - println!("Creating Shared Memory Manager..."); - let id = session.zid(); - let mut shm = SharedMemoryManager::make(id.to_string(), N * 1024).unwrap(); + println!("Creating POSIX SHM backend..."); + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + println!("Creating SHM Provider with POSIX backend..."); + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); - println!("Allocating Shared Memory Buffer..."); let publisher = session.declare_publisher(&path).res().await.unwrap(); + println!("Allocating Shared Memory Buffer..."); + let layout = shared_memory_provider + .alloc_layout() + .size(1024) + .res() + .unwrap(); + println!("Press CTRL-C to quit..."); for idx in 0..(K * N as u32) { - tokio::time::sleep(Duration::from_secs(1)).await; - let mut sbuf = match shm.alloc(1024) { - Ok(buf) => buf, - Err(_) => { - tokio::time::sleep(Duration::from_millis(100)).await; - println!( - "Afer failing allocation the GC collected: {} bytes -- retrying", - shm.garbage_collect() - ); - println!( - "Trying to de-fragment memory... De-fragmented {} bytes", - shm.defragment() - ); - shm.alloc(1024).unwrap() - } - }; + let mut sbuf = layout + .alloc() + .with_policy::>() + .res_async() + .await + .unwrap(); // We reserve a small space at the beginning of the buffer to include the iteration index // of the write. This is simply to have the same format as zn_pub. let prefix = format!("[{idx:4}] "); let prefix_len = prefix.as_bytes().len(); - - // Retrive a mutable slice from the SharedMemoryBuf. - // - // This operation is marked unsafe since we cannot guarantee a single mutable reference - // across multiple processes. Thus if you use it, and you'll inevitable have to use it, - // you have to keep in mind that if you have multiple process retrieving a mutable slice - // you may get into concurrent writes. That said, if you have a serial pipeline and - // the buffer is flowing through the pipeline this will not create any issues. - // - // In short, whilst this operation is marked as unsafe, you are safe if you can - // guarantee that in your application only one process at the time will actually write. - let slice = unsafe { sbuf.as_mut_slice() }; let slice_len = prefix_len + value.as_bytes().len(); - slice[0..prefix_len].copy_from_slice(prefix.as_bytes()); - slice[prefix_len..slice_len].copy_from_slice(value.as_bytes()); + + sbuf[0..prefix_len].copy_from_slice(prefix.as_bytes()); + sbuf[prefix_len..slice_len].copy_from_slice(value.as_bytes()); // Write the data println!( "Put SHM Data ('{}': '{}')", path, - String::from_utf8_lossy(&slice[0..slice_len]) + String::from_utf8_lossy(&sbuf[0..slice_len]) ); - publisher.put(sbuf.clone()).res().await?; - if idx % K == 0 { - let freed = shm.garbage_collect(); - println!("The Gargabe collector freed {freed} bytes"); - let defrag = shm.defragment(); - println!("De-framented {defrag} bytes"); - } - // Dropping the SharedMemoryBuf means to free it. - drop(sbuf); + publisher.put(sbuf).res().await?; } - // Signal the SharedMemoryManager to garbage collect all the freed SharedMemoryBuf. - let _freed = shm.garbage_collect(); - Ok(()) } diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 437f6b2d6d..90c1707765 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -12,10 +12,17 @@ // ZettaScale Zenoh Team, // use clap::Parser; +use zenoh::buffers::ZSlice; use zenoh::config::Config; use zenoh::prelude::r#async::*; use zenoh::publication::CongestionControl; -use zenoh::shm::SharedMemoryManager; +use zenoh::shm::protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, +}; +use zenoh::shm::provider::shared_memory_provider::SharedMemoryProviderBuilder; +use zenoh::shm::provider::types::AllocAlignment; +use zenoh::shm::provider::types::MemoryLayout; use zenoh_examples::CommonArgs; #[tokio::main] @@ -30,11 +37,44 @@ async fn main() { config.transport.shared_memory.set_enabled(true).unwrap(); let z = zenoh::open(config).res().await.unwrap(); - let id = z.zid(); - let mut shm = SharedMemoryManager::make(id.to_string(), sm_size).unwrap(); - let mut buf = shm.alloc(size).unwrap(); - let bs = unsafe { buf.as_mut_slice() }; - for b in bs { + + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(sm_size, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + let mut buf = shared_memory_provider + .alloc_layout() + .size(size) + .res() + .unwrap() + .alloc() + .res() + .unwrap(); + + for b in buf.as_mut() { *b = rand::random::(); } @@ -42,6 +82,8 @@ async fn main() { // Make sure to not drop messages because of congestion control .congestion_control(CongestionControl::Block).res().await.unwrap(); + let buf: ZSlice = buf.into(); + println!("Press CTRL-C to quit..."); loop { publisher.put(buf.clone()).res().await.unwrap(); diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index d304d6a7f6..aa3967becd 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -14,8 +14,8 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh::shm::slice::zsliceshm::zsliceshm; use zenoh_examples::CommonArgs; -use zenoh_shm::SharedMemoryBuf; #[tokio::main] async fn main() { @@ -37,12 +37,12 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { - match sample.payload().deserialize::() { + match sample.payload().deserialize::<&zsliceshm>() { Ok(payload) => println!( ">> [Subscriber] Received {} ('{}': '{:02x?}')", sample.kind(), sample.key_expr().as_str(), - payload.as_slice() + payload ), Err(e) => { println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); diff --git a/io/zenoh-transport/Cargo.toml b/io/zenoh-transport/Cargo.toml index b3a299e8be..7efaabb719 100644 --- a/io/zenoh-transport/Cargo.toml +++ b/io/zenoh-transport/Cargo.toml @@ -29,6 +29,7 @@ shared-memory = [ "zenoh-protocol/shared-memory", "zenoh-shm", "zenoh-codec/shared-memory", + "zenoh-buffers/shared-memory", ] auth_pubkey = ["transport_auth", "rsa"] auth_usrpwd = ["transport_auth"] diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index e923a7e1af..8048d9ff49 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -423,7 +423,7 @@ impl RBatch { pub fn initialize(&mut self, #[allow(unused_variables)] buff: C) -> ZResult<()> where C: Fn() -> T + Copy, - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { #[allow(unused_variables)] let (l, h, p) = Self::split(self.buffer.as_slice(), &self.config); @@ -455,10 +455,10 @@ impl RBatch { #[cfg(feature = "transport_compression")] fn decompress(&self, payload: &[u8], mut buff: impl FnMut() -> T) -> ZResult where - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { let mut into = (buff)(); - let n = lz4_flex::block::decompress_into(payload, into.as_mut_slice()) + let n = lz4_flex::block::decompress_into(payload, into.as_mut()) .map_err(|_| zerror!("Decompression error"))?; let zslice = ZSlice::new(Arc::new(into), 0, n) .map_err(|_| zerror!("Invalid decompression buffer length"))?; diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 73a38545db..ddf1fe23c1 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -33,6 +33,10 @@ use zenoh_protocol::{ VERSION, }; use zenoh_result::{bail, ZResult}; +#[cfg(feature = "shared-memory")] +use zenoh_shm::api::client_storage::GLOBAL_CLIENT_STORAGE; +#[cfg(feature = "shared-memory")] +use zenoh_shm::reader::SharedMemoryReader; use zenoh_task::TaskController; /// # Examples @@ -133,9 +137,17 @@ pub struct TransportManagerBuilder { endpoints: HashMap, // (protocol, config) tx_threads: usize, protocols: Option>, + #[cfg(feature = "shared-memory")] + shm_reader: Option, } impl TransportManagerBuilder { + #[cfg(feature = "shared-memory")] + pub fn shm_reader(mut self, shm_reader: Option) -> Self { + self.shm_reader = shm_reader; + self + } + pub fn zid(mut self, zid: ZenohId) -> Self { self.zid = zid; self @@ -251,7 +263,16 @@ impl TransportManagerBuilder { // Initialize the PRNG and the Cipher let mut prng = PseudoRng::from_entropy(); - let unicast = self.unicast.build(&mut prng)?; + #[cfg(feature = "shared-memory")] + let shm_reader = self + .shm_reader + .unwrap_or_else(|| SharedMemoryReader::new(GLOBAL_CLIENT_STORAGE.clone())); + + let unicast = self.unicast.build( + &mut prng, + #[cfg(feature = "shared-memory")] + &shm_reader, + )?; let multicast = self.multicast.build()?; let mut queue_size = [0; Priority::NUM]; @@ -295,7 +316,12 @@ impl TransportManagerBuilder { let params = TransportManagerParams { config, state }; - Ok(TransportManager::new(params, prng)) + Ok(TransportManager::new( + params, + prng, + #[cfg(feature = "shared-memory")] + shm_reader, + )) } } @@ -321,6 +347,8 @@ impl Default for TransportManagerBuilder { multicast: TransportManagerBuilderMulticast::default(), tx_threads: 1, protocols: None, + #[cfg(feature = "shared-memory")] + shm_reader: None, } } } @@ -333,13 +361,19 @@ pub struct TransportManager { pub(crate) cipher: Arc, pub(crate) locator_inspector: zenoh_link::LocatorInspector, pub(crate) new_unicast_link_sender: NewLinkChannelSender, + #[cfg(feature = "shared-memory")] + pub(crate) shmr: SharedMemoryReader, #[cfg(feature = "stats")] pub(crate) stats: Arc, pub(crate) task_controller: TaskController, } impl TransportManager { - pub fn new(params: TransportManagerParams, mut prng: PseudoRng) -> TransportManager { + pub fn new( + params: TransportManagerParams, + mut prng: PseudoRng, + #[cfg(feature = "shared-memory")] shmr: SharedMemoryReader, + ) -> TransportManager { // Initialize the Cipher let mut key = [0_u8; BlockCipher::BLOCK_SIZE]; prng.fill_bytes(&mut key); @@ -357,6 +391,8 @@ impl TransportManager { new_unicast_link_sender, #[cfg(feature = "stats")] stats: std::sync::Arc::new(crate::stats::TransportStats::default()), + #[cfg(feature = "shared-memory")] + shmr, task_controller: TaskController::default(), }; diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 30a166b273..883f978684 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -205,12 +205,12 @@ impl TransportLinkMulticastRx { pub async fn recv_batch(&self, buff: C) -> ZResult<(RBatch, Locator)> where C: Fn() -> T + Copy, - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { const ERR: &str = "Read error from link: "; let mut into = (buff)(); - let (n, locator) = self.inner.link.read(into.as_mut_slice()).await?; + let (n, locator) = self.inner.link.read(into.as_mut()).await?; let buffer = ZSlice::new(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; let mut batch = RBatch::new(self.inner.config.batch, buffer); batch.initialize(buff).map_err(|_| zerror!("{ERR}{self}"))?; @@ -539,7 +539,7 @@ async fn rx_task( where T: ZSliceBuffer + 'static, F: Fn() -> T, - RecyclingObject: ZSliceBuffer, + RecyclingObject: AsMut<[u8]> + ZSliceBuffer, { let (rbatch, locator) = link .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index c2f0642579..ebc51a2ec6 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -11,8 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use crate::multicast::shm::SharedMemoryMulticast; use crate::multicast::{transport::TransportMulticastInner, TransportMulticast}; use crate::TransportManager; use std::collections::HashMap; @@ -61,9 +59,6 @@ pub struct TransportManagerStateMulticast { pub(crate) protocols: Arc>>, // Established transports pub(crate) transports: Arc>>>, - // Shared memory - #[cfg(feature = "shared-memory")] - pub(super) shm: Arc, } pub struct TransportManagerParamsMulticast { @@ -143,8 +138,6 @@ impl TransportManagerBuilderMulticast { let state = TransportManagerStateMulticast { protocols: Arc::new(Mutex::new(HashMap::new())), transports: Arc::new(Mutex::new(HashMap::new())), - #[cfg(feature = "shared-memory")] - shm: Arc::new(SharedMemoryMulticast::make()?), }; let params = TransportManagerParamsMulticast { config, state }; diff --git a/io/zenoh-transport/src/multicast/mod.rs b/io/zenoh-transport/src/multicast/mod.rs index daf9b069ff..e205125b39 100644 --- a/io/zenoh-transport/src/multicast/mod.rs +++ b/io/zenoh-transport/src/multicast/mod.rs @@ -14,8 +14,6 @@ pub(crate) mod establishment; pub(crate) mod link; pub(crate) mod manager; pub(crate) mod rx; -#[cfg(feature = "shared-memory")] -pub(crate) mod shm; pub(crate) mod transport; pub(crate) mod tx; diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index c7b1d65e59..1576d65cd6 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -42,7 +42,7 @@ impl TransportMulticastInner { #[cfg(feature = "shared-memory")] { if self.manager.config.multicast.is_shm { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.multicast.shm.reader)?; + crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr)?; } } diff --git a/io/zenoh-transport/src/multicast/shm.rs b/io/zenoh-transport/src/multicast/shm.rs deleted file mode 100644 index 060198d927..0000000000 --- a/io/zenoh-transport/src/multicast/shm.rs +++ /dev/null @@ -1,44 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use rand::{Rng, SeedableRng}; -use tokio::sync::RwLock; -use zenoh_crypto::PseudoRng; -use zenoh_result::ZResult; -use zenoh_shm::{SharedMemoryManager, SharedMemoryReader}; - -pub(crate) type Challenge = u64; -const NAME: &str = "zshm_mcast"; - -pub(crate) struct SharedMemoryMulticast { - pub(crate) _manager: SharedMemoryManager, - pub(crate) reader: RwLock, -} - -unsafe impl Sync for SharedMemoryMulticast {} - -impl SharedMemoryMulticast { - pub fn make() -> ZResult { - let mut prng = PseudoRng::from_entropy(); - let nonce = prng.gen::(); - let size = std::mem::size_of::(); - - let mut _manager = SharedMemoryManager::make(format!("{NAME}.{nonce}"), size)?; - - let shmauth = SharedMemoryMulticast { - _manager, - reader: RwLock::new(SharedMemoryReader::new()), - }; - Ok(shmauth) - } -} diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index 155b6b5568..babf68ce61 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -13,6 +13,8 @@ // use super::common::priority::{TransportPriorityRx, TransportPriorityTx}; use super::link::{TransportLinkMulticastConfigUniversal, TransportLinkMulticastUniversal}; +#[cfg(feature = "shared-memory")] +use crate::shm::MulticastTransportShmConfig; #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ @@ -88,6 +90,8 @@ pub(crate) struct TransportMulticastInner { // Transport statistics #[cfg(feature = "stats")] pub(super) stats: Arc, + #[cfg(feature = "shared-memory")] + pub(super) shm: Option, } impl TransportMulticastInner { @@ -109,6 +113,12 @@ impl TransportMulticastInner { #[cfg(feature = "stats")] let stats = Arc::new(TransportStats::new(Some(manager.get_stats().clone()))); + #[cfg(feature = "shared-memory")] + let shm = match manager.config.multicast.is_shm { + true => Some(MulticastTransportShmConfig), + false => None, + }; + let ti = TransportMulticastInner { manager, priority_tx: priority_tx.into_boxed_slice().into(), @@ -119,6 +129,8 @@ impl TransportMulticastInner { task_controller: TaskController::default(), #[cfg(feature = "stats")] stats, + #[cfg(feature = "shared-memory")] + shm, }; let link = TransportLinkMulticastUniversal::new(ti.clone(), config.link); diff --git a/io/zenoh-transport/src/multicast/tx.rs b/io/zenoh-transport/src/multicast/tx.rs index 3b58277402..ee7715d38b 100644 --- a/io/zenoh-transport/src/multicast/tx.rs +++ b/io/zenoh-transport/src/multicast/tx.rs @@ -15,6 +15,9 @@ use super::transport::TransportMulticastInner; use zenoh_core::zread; use zenoh_protocol::network::NetworkMessage; +#[cfg(feature = "shared-memory")] +use crate::shm::map_zmsg_to_partner; + //noinspection ALL impl TransportMulticastInner { fn schedule_on_link(&self, msg: NetworkMessage) -> bool { @@ -53,12 +56,7 @@ impl TransportMulticastInner { pub(super) fn schedule(&self, mut msg: NetworkMessage) -> bool { #[cfg(feature = "shared-memory")] { - let res = if self.manager.config.multicast.is_shm { - crate::shm::map_zmsg_to_shminfo(&mut msg) - } else { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.multicast.shm.reader) - }; - if let Err(e) = res { + if let Err(e) = map_zmsg_to_partner(&mut msg, &self.shm) { tracing::trace!("Failed SHM conversion: {}", e); return false; } diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 09edde884e..6dd65aab16 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -11,70 +11,178 @@ // Contributors: // ZettaScale Zenoh Team, // -use tokio::sync::RwLock; +use std::collections::HashSet; use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZBuf, ZSlice, ZSliceKind}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_core::{zasyncread, zasyncwrite, zerror}; +use zenoh_core::zerror; use zenoh_protocol::{ network::{NetworkBody, NetworkMessage, Push, Request, Response}, zenoh::{ err::Err, ext::ShmType, query::{ext::QueryBodyType, Query}, - reply::ReplyBody, PushBody, Put, Reply, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; -use zenoh_shm::{SharedMemoryBuf, SharedMemoryBufInfo, SharedMemoryReader}; +use zenoh_shm::{ + api::common::types::ProtocolID, reader::SharedMemoryReader, SharedMemoryBuf, + SharedMemoryBufInfo, +}; + +use crate::unicast::establishment::ext::shm::AuthSegment; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TransportShmConfig { + partner_protocols: HashSet, +} + +impl PartnerShmConfig for TransportShmConfig { + fn supports_protocol(&self, protocol: ProtocolID) -> bool { + self.partner_protocols.contains(&protocol) + } +} -// Traits +impl TransportShmConfig { + pub fn new(partner_segment: AuthSegment) -> Self { + Self { + partner_protocols: partner_segment.protocols().iter().cloned().collect(), + } + } +} + +#[derive(Clone)] +pub struct MulticastTransportShmConfig; + +impl PartnerShmConfig for MulticastTransportShmConfig { + fn supports_protocol(&self, _protocol: ProtocolID) -> bool { + true + } +} + +pub fn map_zmsg_to_partner( + msg: &mut NetworkMessage, + partner_shm_cfg: &Option, +) -> ZResult<()> { + match &mut msg.body { + NetworkBody::Push(Push { payload, .. }) => match payload { + PushBody::Put(b) => b.map_to_partner(partner_shm_cfg), + PushBody::Del(_) => Ok(()), + }, + NetworkBody::Request(Request { payload, .. }) => match payload { + RequestBody::Query(b) => b.map_to_partner(partner_shm_cfg), + }, + NetworkBody::Response(Response { payload, .. }) => match payload { + ResponseBody::Reply(b) => b.map_to_partner(partner_shm_cfg), + ResponseBody::Err(b) => b.map_to_partner(partner_shm_cfg), + }, + NetworkBody::ResponseFinal(_) + | NetworkBody::Interest(_) + | NetworkBody::Declare(_) + | NetworkBody::OAM(_) => Ok(()), + } +} + +pub fn map_zmsg_to_shmbuf(msg: &mut NetworkMessage, shmr: &SharedMemoryReader) -> ZResult<()> { + match &mut msg.body { + NetworkBody::Push(Push { payload, .. }) => match payload { + PushBody::Put(b) => b.map_to_shmbuf(shmr), + PushBody::Del(_) => Ok(()), + }, + NetworkBody::Request(Request { payload, .. }) => match payload { + RequestBody::Query(b) => b.map_to_shmbuf(shmr), + }, + NetworkBody::Response(Response { payload, .. }) => match payload { + ResponseBody::Err(b) => b.map_to_shmbuf(shmr), + ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), + }, + NetworkBody::ResponseFinal(_) + | NetworkBody::Interest(_) + | NetworkBody::Declare(_) + | NetworkBody::OAM(_) => Ok(()), + } +} + +pub trait PartnerShmConfig { + fn supports_protocol(&self, protocol: ProtocolID) -> bool; +} + +// Currently, there can be three forms of ZSlice: +// rawbuf - usual non-shm buffer +// shminfo - small SHM info that can be used to mount SHM buffer and get access to it's contents +// shmbuf - mounted SHM buffer +// On RX and TX we need to do the following conversion: trait MapShm { - fn map_to_shminfo(&mut self) -> ZResult; - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult; + // RX: + // - shminfo -> shmbuf + // - rawbuf -> rawbuf (no changes) + fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()>; + + // TX: + // - shmbuf -> shminfo if partner supports shmbuf's SHM protocol + // - shmbuf -> rawbuf if partner does not support shmbuf's SHM protocol + // - rawbuf -> rawbuf (no changes) + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()>; } -macro_rules! map_to_shminfo { - ($zbuf:expr, $ext_shm:expr) => {{ - let res = map_zbuf_to_shminfo($zbuf)?; - if res { - *$ext_shm = Some(ShmType::new()); +macro_rules! map_to_partner { + ($zbuf:expr, $ext_shm:expr, $partner_shm_cfg:expr) => {{ + match $partner_shm_cfg { + Some(shm_cfg) => { + let res = to_shm_partner($zbuf, shm_cfg)?; + if res { + *$ext_shm = Some(ShmType::new()); + } + } + None => { + to_non_shm_partner($zbuf); + } } - Ok(res) + + Ok(()) }}; } -macro_rules! map_to_shmbuf { +macro_rules! map_zbuf_to_shmbuf { ($zbuf:expr, $ext_shm:expr, $shmr:expr) => {{ if $ext_shm.is_some() { *$ext_shm = None; map_zbuf_to_shmbuf($zbuf, $shmr) } else { - Ok(false) + Ok(()) } }}; } // Impl - Put impl MapShm for Put { - fn map_to_shminfo(&mut self) -> ZResult { + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { let Self { payload, ext_shm, .. } = self; - map_to_shminfo!(payload, ext_shm) + map_to_partner!(payload, ext_shm, partner_shm_cfg) } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { + fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { let Self { payload, ext_shm, .. } = self; - map_to_shmbuf!(payload, ext_shm, shmr) + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) } } // Impl - Query impl MapShm for Query { - fn map_to_shminfo(&mut self) -> ZResult { + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { if let Self { ext_body: Some(QueryBodyType { payload, ext_shm, .. @@ -82,13 +190,13 @@ impl MapShm for Query { .. } = self { - map_to_shminfo!(payload, ext_shm) + map_to_partner!(payload, ext_shm, partner_shm_cfg) } else { - Ok(false) + Ok(()) } } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { + fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { if let Self { ext_body: Some(QueryBodyType { payload, ext_shm, .. @@ -96,77 +204,75 @@ impl MapShm for Query { .. } = self { - map_to_shmbuf!(payload, ext_shm, shmr) + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) } else { - Ok(false) + Ok(()) } } } // Impl - Reply impl MapShm for Reply { - fn map_to_shminfo(&mut self) -> ZResult { + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { match &mut self.payload { - ReplyBody::Put(b) => b.map_to_shminfo(), - _ => Ok(false), + PushBody::Put(put) => { + let Put { + payload, ext_shm, .. + } = put; + map_to_partner!(payload, ext_shm, partner_shm_cfg) + } + PushBody::Del(_) => Ok(()), } } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { + fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { match &mut self.payload { - ReplyBody::Put(b) => b.map_to_shmbuf(shmr), - _ => Ok(false), + PushBody::Put(put) => { + let Put { + payload, ext_shm, .. + } = put; + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) + } + PushBody::Del(_) => Ok(()), } } } // Impl - Err impl MapShm for Err { - fn map_to_shminfo(&mut self) -> ZResult { - Ok(false) - } - - fn map_to_shmbuf(&mut self, _shmr: &RwLock) -> ZResult { - Ok(false) + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { + let Self { + payload, ext_shm, .. + } = self; + map_to_partner!(payload, ext_shm, partner_shm_cfg) } -} -// ShmBuf -> ShmInfo -pub fn map_zmsg_to_shminfo(msg: &mut NetworkMessage) -> ZResult { - match &mut msg.body { - NetworkBody::Push(Push { payload, .. }) => match payload { - PushBody::Put(b) => b.map_to_shminfo(), - PushBody::Del(_) => Ok(false), - }, - NetworkBody::Request(Request { payload, .. }) => match payload { - RequestBody::Query(b) => b.map_to_shminfo(), - }, - NetworkBody::Response(Response { payload, .. }) => match payload { - ResponseBody::Reply(b) => b.map_to_shminfo(), - ResponseBody::Err(b) => b.map_to_shminfo(), - }, - NetworkBody::ResponseFinal(_) - | NetworkBody::Interest(_) - | NetworkBody::Declare(_) - | NetworkBody::OAM(_) => Ok(false), + fn map_to_shmbuf(&mut self, shmr: &SharedMemoryReader) -> ZResult<()> { + let Self { + payload, ext_shm, .. + } = self; + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) } } -// Mapping -pub fn map_zbuf_to_shminfo(zbuf: &mut ZBuf) -> ZResult { - let mut res = false; - for zs in zbuf.zslices_mut() { - if let Some(shmb) = zs.downcast_ref::() { - *zs = map_zslice_to_shminfo(shmb)?; - res = true; - } - } - Ok(res) +#[cold] +#[inline(never)] +pub fn shmbuf_to_rawbuf(shmb: &SharedMemoryBuf) -> ZSlice { + // Convert shmb to raw buffer + // TODO: optimize this! We should not make additional buffer copy here, + // but we need to make serializer serialize SHM buffer as raw buffer. + shmb.as_ref().to_vec().into() } #[cold] #[inline(never)] -pub fn map_zslice_to_shminfo(shmb: &SharedMemoryBuf) -> ZResult { +pub fn shmbuf_to_shminfo(shmb: &SharedMemoryBuf) -> ZResult { // Serialize the shmb info let codec = Zenoh080::new(); let mut info = vec![]; @@ -175,73 +281,63 @@ pub fn map_zslice_to_shminfo(shmb: &SharedMemoryBuf) -> ZResult { .write(&mut writer, &shmb.info) .map_err(|e| zerror!("{:?}", e))?; // Increase the reference count so to keep the SharedMemoryBuf valid - shmb.inc_ref_count(); + unsafe { shmb.inc_ref_count() }; // Replace the content of the slice let mut zslice: ZSlice = info.into(); zslice.kind = ZSliceKind::ShmPtr; Ok(zslice) } -// ShmInfo -> ShmBuf -pub fn map_zmsg_to_shmbuf( - msg: &mut NetworkMessage, - shmr: &RwLock, +fn to_shm_partner( + zbuf: &mut ZBuf, + partner_shm_cfg: &ShmCfg, ) -> ZResult { - match &mut msg.body { - NetworkBody::Push(Push { payload, .. }) => match payload { - PushBody::Put(b) => b.map_to_shmbuf(shmr), - PushBody::Del(_) => Ok(false), - }, - NetworkBody::Request(Request { payload, .. }) => match payload { - RequestBody::Query(b) => b.map_to_shmbuf(shmr), - }, - NetworkBody::Response(Response { payload, .. }) => match payload { - ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), - ResponseBody::Err(b) => b.map_to_shmbuf(shmr), - }, - NetworkBody::ResponseFinal(_) - | NetworkBody::Interest(_) - | NetworkBody::Declare(_) - | NetworkBody::OAM(_) => Ok(false), + let mut res = false; + for zs in zbuf.zslices_mut() { + if let Some(shmb) = zs.downcast_ref::() { + if partner_shm_cfg.supports_protocol(shmb.info.shm_protocol) { + *zs = shmbuf_to_shminfo(shmb)?; + res = true; + } else { + // Replace the content of the slice with rawbuf + *zs = shmbuf_to_rawbuf(shmb) + } + } } + Ok(res) } -// Mapping -pub fn map_zbuf_to_shmbuf(zbuf: &mut ZBuf, shmr: &RwLock) -> ZResult { - let mut res = false; +fn to_non_shm_partner(zbuf: &mut ZBuf) { + for zs in zbuf.zslices_mut() { + if let Some(shmb) = zs.downcast_ref::() { + // Replace the content of the slice with rawbuf + *zs = shmbuf_to_rawbuf(shmb) + } + } +} + +pub fn map_zbuf_to_shmbuf(zbuf: &mut ZBuf, shmr: &SharedMemoryReader) -> ZResult<()> { for zs in zbuf.zslices_mut().filter(|x| x.kind == ZSliceKind::ShmPtr) { - res |= map_zslice_to_shmbuf(zs, shmr)?; + map_zslice_to_shmbuf(zs, shmr)?; } - Ok(res) + Ok(()) } #[cold] #[inline(never)] -pub fn map_zslice_to_shmbuf( - zslice: &mut ZSlice, - shmr: &RwLock, -) -> ZResult { - // Deserialize the shmb info into shm buff +pub fn map_zslice_to_shmbuf(zslice: &mut ZSlice, shmr: &SharedMemoryReader) -> ZResult<()> { let codec = Zenoh080::new(); let mut reader = zslice.reader(); + // Deserialize the shminfo let shmbinfo: SharedMemoryBufInfo = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - // First, try in read mode allowing concurrenct lookups - let r_guard = tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { zasyncread!(shmr) }) - }); - let smb = r_guard.try_read_shmbuf(&shmbinfo).or_else(|_| { - drop(r_guard); - let mut w_guard = tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { zasyncwrite!(shmr) }) - }); - w_guard.read_shmbuf(&shmbinfo) - })?; + // Mount shmbuf + let smb = shmr.read_shmbuf(&shmbinfo)?; // Replace the content of the slice let zs: ZSlice = smb.into(); *zslice = zs; - Ok(true) + Ok(()) } diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index d7c64c3034..48638834e0 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -12,7 +12,10 @@ // ZettaScale Zenoh Team, // #[cfg(feature = "shared-memory")] -use crate::unicast::shared_memory_unicast::Challenge; +use super::ext::shm::AuthSegment; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; + use crate::{ common::batch::BatchConfig, unicast::{ @@ -79,7 +82,7 @@ struct RecvInitSynOut { other_zid: ZenohId, other_whatami: WhatAmI, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } // InitAck @@ -90,10 +93,12 @@ struct SendInitAckIn { other_zid: ZenohId, other_whatami: WhatAmI, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } struct SendInitAckOut { cookie_nonce: u64, + #[cfg(feature = "shared-memory")] + ext_shm: Option, } // OpenSyn @@ -126,7 +131,8 @@ struct AcceptLink<'a> { #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::MultiLinkFsm<'a>, #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm<'a>, + // Will be None if SHM operation is disabled by Config + ext_shm: Option>, #[cfg(feature = "transport_auth")] ext_auth: ext::auth::AuthFsm<'a>, ext_lowlatency: ext::lowlatency::LowLatencyFsm<'a>, @@ -208,11 +214,13 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - let ext_shm = self - .ext_shm - .recv_init_syn((&mut state.transport.ext_shm, init_syn.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + let ext_shm = match &self.ext_shm { + Some(my_shm) => my_shm + .recv_init_syn(init_syn.ext_shm) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + _ => None, + }; // Extension Auth #[cfg(feature = "transport_auth")] @@ -267,14 +275,14 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm - .send_init_ack((&mut state.transport.ext_shm, input.ext_shm)) + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(my_shm) => my_shm + .send_init_ack(&input.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + _ => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -357,6 +365,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { batch_size: state.transport.batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -371,7 +380,11 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; - let output = SendInitAckOut { cookie_nonce }; + let output = SendInitAckOut { + cookie_nonce, + #[cfg(feature = "shared-memory")] + ext_shm: input.ext_shm, + }; Ok(output) } @@ -464,10 +477,12 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - self.ext_shm - .recv_open_syn((&mut state.transport.ext_shm, open_syn.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + if let Some(my_shm) = self.ext_shm.as_ref() { + my_shm + .recv_open_syn((&mut state.transport.ext_shm, open_syn.ext_shm)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + } // Extension Auth #[cfg(feature = "transport_auth")] @@ -528,14 +543,14 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm - .send_open_ack(&mut state.transport.ext_shm) + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(my_shm) => my_shm + .send_open_ack(&state.transport.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + None => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -574,6 +589,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { lease: input.mine_lease, initial_sn: mine_initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -607,7 +623,12 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - cipher: &manager.cipher, ext_qos: ext::qos::QoSFsm::new(), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm::new(&manager.state.unicast.shm), + ext_shm: manager + .state + .unicast + .auth_shm + .as_ref() + .map(ext::shm::ShmFsm::new), #[cfg(feature = "transport_multilink")] ext_mlink: manager.state.unicast.multilink.fsm(&manager.prng), #[cfg(feature = "transport_auth")] @@ -644,7 +665,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - .multilink .accept(manager.config.unicast.max_links > 1), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::StateAccept::new(manager.config.unicast.is_shm), + ext_shm: ext::shm::StateAccept::new(), ext_lowlatency: ext::lowlatency::StateAccept::new( manager.config.unicast.is_lowlatency, ), @@ -708,7 +729,10 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - #[cfg(feature = "transport_multilink")] multilink: state.transport.ext_mlink.multilink(), #[cfg(feature = "shared-memory")] - is_shm: state.transport.ext_shm.is_shm(), + shm: match state.transport.ext_shm.negotiated_to_use_shm() { + true => iack_out.ext_shm.map(TransportShmConfig::new), + false => None, + }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), }; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index 2aec0cf508..1287095a51 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -11,31 +11,108 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::{ - establishment::{AcceptFsm, OpenFsm}, - shared_memory_unicast::{Challenge, SharedMemoryUnicast}, -}; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; use async_trait::async_trait; -use std::convert::TryInto; +use rand::{Rng, SeedableRng}; +use std::ops::Deref; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, HasWriter, Writer}, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_core::zasyncwrite; +use zenoh_core::bail; +use zenoh_crypto::PseudoRng; use zenoh_protocol::transport::{init, open}; -use zenoh_result::{zerror, Error as ZError}; -use zenoh_shm::SharedMemoryBufInfo; +use zenoh_result::{zerror, Error as ZError, ZResult}; +use zenoh_shm::{api::common::types::ProtocolID, posix_shm::array::ArrayInSHM}; + +/*************************************/ +/* Segment */ +/*************************************/ +const AUTH_SEGMENT_PREFIX: &str = "auth"; + +pub(crate) type AuthSegmentID = u32; +pub(crate) type AuthChallenge = u64; + +#[derive(Debug)] +pub struct AuthSegment { + array: ArrayInSHM, +} + +impl AuthSegment { + pub fn create(challenge: AuthChallenge, shm_protocols: &[ProtocolID]) -> ZResult { + let array = ArrayInSHM::::create( + 1 + shm_protocols.len(), + AUTH_SEGMENT_PREFIX, + )?; + unsafe { + (*array.elem_mut(0)) = challenge; + for elem in 1..array.elem_count() { + (*array.elem_mut(elem)) = shm_protocols[elem - 1] as u64; + } + }; + Ok(Self { array }) + } + + pub fn open(id: AuthSegmentID) -> ZResult { + let array = ArrayInSHM::open(id, AUTH_SEGMENT_PREFIX)?; + Ok(Self { array }) + } + + pub fn challenge(&self) -> AuthChallenge { + unsafe { *self.array.elem(0) } + } + + pub fn protocols(&self) -> Vec { + let mut result = vec![]; + for elem in 1..self.array.elem_count() { + result.push(unsafe { *self.array.elem(elem) as u32 }); + } + result + } + + pub fn id(&self) -> AuthSegmentID { + self.array.id() + } +} + +/*************************************/ +/* Authenticator */ +/*************************************/ +pub(crate) struct AuthUnicast { + segment: AuthSegment, +} + +impl Deref for AuthUnicast { + type Target = AuthSegment; + + fn deref(&self) -> &Self::Target { + &self.segment + } +} + +impl AuthUnicast { + pub fn new(shm_protocols: &[ProtocolID]) -> ZResult { + // Create a challenge for session establishment + let mut prng = PseudoRng::from_entropy(); + let nonce = prng.gen(); + + // allocate SHM segment with challenge + let segment = AuthSegment::create(nonce, shm_protocols)?; + + Ok(Self { segment }) + } +} /*************************************/ /* InitSyn */ /*************************************/ /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// ~ ShmMemBufInfo ~ +/// ~ Segment id ~ /// +---------------+ pub(crate) struct InitSyn { - pub(crate) alice_info: SharedMemoryBufInfo, + pub(crate) alice_segment: AuthSegmentID, } // Codec @@ -46,7 +123,7 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &InitSyn) -> Self::Output { - self.write(&mut *writer, &x.alice_info)?; + self.write(&mut *writer, &x.alice_segment)?; Ok(()) } } @@ -58,8 +135,8 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let alice_info: SharedMemoryBufInfo = self.read(&mut *reader)?; - Ok(InitSyn { alice_info }) + let alice_segment = self.read(&mut *reader)?; + Ok(InitSyn { alice_segment }) } } @@ -70,11 +147,11 @@ where /// +-+-+-+-+-+-+-+-+ /// ~ challenge ~ /// +---------------+ -/// ~ ShmMemBufInfo ~ +/// ~ Segment id ~ /// +---------------+ struct InitAck { alice_challenge: u64, - bob_info: SharedMemoryBufInfo, + bob_segment: AuthSegmentID, } impl WCodec<&InitAck, &mut W> for Zenoh080 @@ -85,7 +162,7 @@ where fn write(self, writer: &mut W, x: &InitAck) -> Self::Output { self.write(&mut *writer, x.alice_challenge)?; - self.write(&mut *writer, &x.bob_info)?; + self.write(&mut *writer, &x.bob_segment)?; Ok(()) } } @@ -98,10 +175,10 @@ where fn read(self, reader: &mut R) -> Result { let alice_challenge: u64 = self.read(&mut *reader)?; - let bob_info: SharedMemoryBufInfo = self.read(&mut *reader)?; + let bob_segment = self.read(&mut *reader)?; Ok(InitAck { alice_challenge, - bob_info, + bob_segment, }) } } @@ -124,11 +201,11 @@ where // Extension Fsm pub(crate) struct ShmFsm<'a> { - inner: &'a SharedMemoryUnicast, + inner: &'a AuthUnicast, } impl<'a> ShmFsm<'a> { - pub(crate) const fn new(inner: &'a SharedMemoryUnicast) -> Self { + pub(crate) const fn new(inner: &'a AuthUnicast) -> Self { Self { inner } } } @@ -136,18 +213,29 @@ impl<'a> ShmFsm<'a> { /*************************************/ /* OPEN */ /*************************************/ -#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct StateOpen { - is_shm: bool, + // false by default, will be switched to true at the end of open_ack + negotiated_to_use_shm: bool, } impl StateOpen { - pub(crate) const fn new(is_shm: bool) -> Self { - Self { is_shm } + pub(crate) const fn new() -> Self { + Self { + negotiated_to_use_shm: false, + } } - pub(crate) const fn is_shm(&self) -> bool { - self.is_shm + pub(crate) const fn negotiated_to_use_shm(&self) -> bool { + self.negotiated_to_use_shm + } + + #[cfg(test)] + pub(crate) fn rand() -> Self { + let mut rng = rand::thread_rng(); + Self { + negotiated_to_use_shm: rng.gen_bool(0.5), + } } } @@ -159,16 +247,12 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { type SendInitSynOut = Option; async fn send_init_syn( self, - state: Self::SendInitSynIn, + _state: Self::SendInitSynIn, ) -> Result { const S: &str = "Shm extension - Send InitSyn."; - if !state.is_shm() { - return Ok(None); - } - let init_syn = InitSyn { - alice_info: self.inner.challenge.info.clone(), + alice_segment: self.inner.id(), }; let codec = Zenoh080::new(); @@ -181,22 +265,16 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { Ok(Some(init::ext::Shm::new(buff.into()))) } - type RecvInitAckIn = (&'a mut StateOpen, Option); - type RecvInitAckOut = Challenge; + type RecvInitAckIn = Option; + type RecvInitAckOut = Option; async fn recv_init_ack( self, - input: Self::RecvInitAckIn, + mut input: Self::RecvInitAckIn, ) -> Result { const S: &str = "Shm extension - Recv InitAck."; - let (state, mut ext) = input; - if !state.is_shm() { - return Ok(0); - } - - let Some(ext) = ext.take() else { - state.is_shm = false; - return Ok(0); + let Some(ext) = input.take() else { + return Ok(None); }; // Decode the extension @@ -204,18 +282,11 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { let mut reader = ext.value.reader(); let Ok(init_ack): Result = codec.read(&mut reader) else { tracing::trace!("{} Decoding error.", S); - state.is_shm = false; - return Ok(0); + return Ok(None); }; // Alice challenge as seen by Alice - let bytes: [u8; std::mem::size_of::()] = self - .inner - .challenge - .as_slice() - .try_into() - .map_err(|e| zerror!("{}", e))?; - let challenge = u64::from_le_bytes(bytes); + let challenge = self.inner.challenge(); // Verify that Bob has correctly read Alice challenge if challenge != init_ack.alice_challenge { @@ -225,35 +296,22 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { init_ack.alice_challenge, challenge ); - state.is_shm = false; - return Ok(0); + return Ok(None); } - // Read Bob's SharedMemoryBuf - let shm_buff = match zasyncwrite!(self.inner.reader).read_shmbuf(&init_ack.bob_info) { + // Read Bob's SHM Segment + let bob_segment = match AuthSegment::open(init_ack.bob_segment) { Ok(buff) => buff, Err(e) => { tracing::trace!("{} {}", S, e); - state.is_shm = false; - return Ok(0); - } - }; - - // Bob challenge as seen by Alice - let bytes: [u8; std::mem::size_of::()] = match shm_buff.as_slice().try_into() { - Ok(bytes) => bytes, - Err(_) => { - tracing::trace!("{} Failed to read remote Shm.", S); - state.is_shm = false; - return Ok(0); + return Ok(None); } }; - let bob_challenge = u64::from_le_bytes(bytes); - Ok(bob_challenge) + Ok(Some(bob_segment)) } - type SendOpenSynIn = (&'a StateOpen, Self::RecvInitAckOut); + type SendOpenSynIn = &'a Self::RecvInitAckOut; type SendOpenSynOut = Option; async fn send_open_syn( self, @@ -261,12 +319,9 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { ) -> Result { // const S: &str = "Shm extension - Send OpenSyn."; - let (state, bob_challenge) = input; - if !state.is_shm() { - return Ok(None); - } - - Ok(Some(open::ext::Shm::new(bob_challenge))) + Ok(input + .as_ref() + .map(|val| open::ext::Shm::new(val.challenge()))) } type RecvOpenAckIn = (&'a mut StateOpen, Option); @@ -278,22 +333,17 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { const S: &str = "Shm extension - Recv OpenAck."; let (state, mut ext) = input; - if !state.is_shm() { - return Ok(()); - } let Some(ext) = ext.take() else { - state.is_shm = false; return Ok(()); }; if ext.value != 1 { tracing::trace!("{} Invalid value.", S); - state.is_shm = false; return Ok(()); } - state.is_shm = true; + state.negotiated_to_use_shm = true; Ok(()) } } @@ -302,27 +352,7 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { /* ACCEPT */ /*************************************/ -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub(crate) struct StateAccept { - is_shm: bool, -} - -impl StateAccept { - pub(crate) const fn new(is_shm: bool) -> Self { - Self { is_shm } - } - - pub(crate) const fn is_shm(&self) -> bool { - self.is_shm - } - - #[cfg(test)] - pub(crate) fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - Self::new(rng.gen_bool(0.5)) - } -} +pub(crate) type StateAccept = StateOpen; // Codec impl WCodec<&StateAccept, &mut W> for Zenoh080 @@ -332,8 +362,8 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &StateAccept) -> Self::Output { - let is_shm = u8::from(x.is_shm); - self.write(&mut *writer, is_shm)?; + let negotiated_to_use_shm = u8::from(x.negotiated_to_use_shm); + self.write(&mut *writer, negotiated_to_use_shm)?; Ok(()) } } @@ -345,9 +375,11 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let is_shm: u8 = self.read(&mut *reader)?; - let is_shm = is_shm == 1; - Ok(StateAccept { is_shm }) + let negotiated_to_use_shm: u8 = self.read(&mut *reader)?; + let negotiated_to_use_shm: bool = negotiated_to_use_shm == 1; + Ok(StateAccept { + negotiated_to_use_shm, + }) } } @@ -355,22 +387,16 @@ where impl<'a> AcceptFsm for &'a ShmFsm<'a> { type Error = ZError; - type RecvInitSynIn = (&'a mut StateAccept, Option); - type RecvInitSynOut = Challenge; + type RecvInitSynIn = Option; + type RecvInitSynOut = Option; async fn recv_init_syn( self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "Shm extension - Recv InitSyn."; - let (state, mut ext) = input; - if !state.is_shm() { - return Ok(0); - } - - let Some(ext) = ext.take() else { - state.is_shm = false; - return Ok(0); + let Some(ext) = input.as_ref() else { + return Ok(None); }; // Decode the extension @@ -378,35 +404,16 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { let mut reader = ext.value.reader(); let Ok(init_syn): Result = codec.read(&mut reader) else { tracing::trace!("{} Decoding error.", S); - state.is_shm = false; - return Ok(0); + bail!(""); }; - // Read Alice's SharedMemoryBuf - let shm_buff = match zasyncwrite!(self.inner.reader).read_shmbuf(&init_syn.alice_info) { - Ok(buff) => buff, - Err(e) => { - tracing::trace!("{} {}", S, e); - state.is_shm = false; - return Ok(0); - } - }; + // Read Alice's SHM Segment + let alice_segment = AuthSegment::open(init_syn.alice_segment)?; - // Alice challenge as seen by Bob - let bytes: [u8; std::mem::size_of::()] = match shm_buff.as_slice().try_into() { - Ok(bytes) => bytes, - Err(_) => { - tracing::trace!("{} Failed to read remote Shm.", S); - state.is_shm = false; - return Ok(0); - } - }; - let alice_challenge = u64::from_le_bytes(bytes); - - Ok(alice_challenge) + Ok(Some(alice_segment)) } - type SendInitAckIn = (&'a StateAccept, Self::RecvInitSynOut); + type SendInitAckIn = &'a Self::RecvInitSynOut; type SendInitAckOut = Option; async fn send_init_ack( self, @@ -414,14 +421,13 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { ) -> Result { const S: &str = "Shm extension - Send InitAck."; - let (state, alice_challenge) = input; - if !state.is_shm() { + let Some(alice_segment) = input.as_ref() else { return Ok(None); - } + }; let init_syn = InitAck { - alice_challenge, - bob_info: self.inner.challenge.info.clone(), + alice_challenge: alice_segment.challenge(), + bob_segment: self.inner.id(), }; let codec = Zenoh080::new(); @@ -443,23 +449,13 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { const S: &str = "Shm extension - Recv OpenSyn."; let (state, mut ext) = input; - if !state.is_shm() { - return Ok(()); - } let Some(ext) = ext.take() else { - state.is_shm = false; return Ok(()); }; // Bob challenge as seen by Bob - let bytes: [u8; std::mem::size_of::()] = self - .inner - .challenge - .as_slice() - .try_into() - .map_err(|e| zerror!("{}", e))?; - let challenge = u64::from_le_bytes(bytes); + let challenge = self.inner.challenge(); // Verify that Alice has correctly read Bob challenge let bob_challnge = ext.value; @@ -470,26 +466,25 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { bob_challnge, challenge ); - state.is_shm = false; return Ok(()); } + state.negotiated_to_use_shm = true; + Ok(()) } - type SendOpenAckIn = &'a mut StateAccept; + type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( self, - state: Self::SendOpenAckIn, + input: Self::SendOpenAckIn, ) -> Result { // const S: &str = "Shm extension - Send OpenAck."; - if !state.is_shm() { - return Ok(None); - } - - state.is_shm = true; - Ok(Some(open::ext::Shm::new(1))) + Ok(match input.negotiated_to_use_shm { + true => Some(open::ext::Shm::new(1)), + false => None, + }) } } diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index bb5db2336e..40aa959d10 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -12,7 +12,9 @@ // ZettaScale Zenoh Team, // #[cfg(feature = "shared-memory")] -use crate::unicast::shared_memory_unicast::Challenge; +use super::ext::shm::AuthSegment; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; use crate::{ common::batch::BatchConfig, unicast::{ @@ -81,7 +83,7 @@ struct RecvInitAckOut { other_whatami: WhatAmI, other_cookie: ZSlice, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } // OpenSyn @@ -91,11 +93,13 @@ struct SendOpenSynIn { other_zid: ZenohId, other_cookie: ZSlice, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } struct SendOpenSynOut { mine_initial_sn: TransportSn, + #[cfg(feature = "shared-memory")] + ext_shm: Option, } // OpenAck @@ -110,7 +114,7 @@ struct OpenLink<'a> { #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::MultiLinkFsm<'a>, #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm<'a>, + ext_shm: Option>, #[cfg(feature = "transport_auth")] ext_auth: ext::auth::AuthFsm<'a>, ext_lowlatency: ext::lowlatency::LowLatencyFsm<'a>, @@ -138,14 +142,14 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(ext) => ext .send_init_syn(&state.transport.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + None => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -191,6 +195,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { batch_size: state.transport.batch_size, resolution: state.transport.resolution, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -295,11 +300,13 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - let shm_challenge = self - .ext_shm - .recv_init_ack((&mut state.transport.ext_shm, init_ack.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + let shm_segment = match self.ext_shm.as_ref() { + Some(ext) => ext + .recv_init_ack(init_ack.ext_shm) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None => None, + }; // Extension Auth #[cfg(feature = "transport_auth")] @@ -333,7 +340,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { other_whatami: init_ack.whatami, other_cookie: init_ack.cookie, #[cfg(feature = "shared-memory")] - ext_shm: shm_challenge, + ext_shm: shm_segment, }; Ok(output) } @@ -354,14 +361,14 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm - .send_open_syn((&state.transport.ext_shm, input.ext_shm)) + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(ext_shm) => ext_shm + .send_open_syn(&input.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + None => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -408,6 +415,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { initial_sn: mine_initial_sn, cookie: input.other_cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -421,7 +429,11 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; - let output = SendOpenSynOut { mine_initial_sn }; + let output = SendOpenSynOut { + mine_initial_sn, + #[cfg(feature = "shared-memory")] + ext_shm: input.ext_shm, + }; Ok(output) } @@ -471,10 +483,11 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - self.ext_shm - .recv_open_ack((&mut state.transport.ext_shm, open_ack.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + if let Some(ext) = self.ext_shm.as_ref() { + ext.recv_open_ack((&mut state.transport.ext_shm, open_ack.ext_shm)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))? + }; // Extension Auth #[cfg(feature = "transport_auth")] @@ -531,7 +544,12 @@ pub(crate) async fn open_link( #[cfg(feature = "transport_multilink")] ext_mlink: manager.state.unicast.multilink.fsm(&manager.prng), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm::new(&manager.state.unicast.shm), + ext_shm: manager + .state + .unicast + .auth_shm + .as_ref() + .map(ext::shm::ShmFsm::new), #[cfg(feature = "transport_auth")] ext_auth: manager.state.unicast.authenticator.fsm(&manager.prng), ext_lowlatency: ext::lowlatency::LowLatencyFsm::new(), @@ -555,7 +573,7 @@ pub(crate) async fn open_link( .multilink .open(manager.config.unicast.max_links > 1), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::StateOpen::new(manager.config.unicast.is_shm), + ext_shm: ext::shm::StateOpen::new(), ext_lowlatency: ext::lowlatency::StateOpen::new(manager.config.unicast.is_lowlatency), }, @@ -619,7 +637,10 @@ pub(crate) async fn open_link( #[cfg(feature = "transport_multilink")] multilink: state.transport.ext_mlink.multilink(), #[cfg(feature = "shared-memory")] - is_shm: state.transport.ext_shm.is_shm(), + shm: match state.transport.ext_shm.negotiated_to_use_shm() { + true => osyn_out.ext_shm.map(TransportShmConfig::new), + false => None, + }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), }; diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index e03201e0c5..1c9c190aae 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -207,7 +207,7 @@ impl TransportLinkUnicastRx { pub async fn recv_batch(&mut self, buff: C) -> ZResult where C: Fn() -> T + Copy, - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { const ERR: &str = "Read error from link: "; @@ -220,14 +220,14 @@ impl TransportLinkUnicastRx { // Read the bytes let slice = into - .as_mut_slice() + .as_mut() .get_mut(len.len()..len.len() + l) .ok_or_else(|| zerror!("{ERR}{self}. Invalid batch length or buffer size."))?; self.link.read_exact(slice).await?; len.len() + l } else { // Read the bytes - self.link.read(into.as_mut_slice()).await? + self.link.read(into.as_mut()).await? }; // tracing::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); diff --git a/io/zenoh-transport/src/unicast/lowlatency/rx.rs b/io/zenoh-transport/src/unicast/lowlatency/rx.rs index 4be94cc1a0..de0b62354f 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/rx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/rx.rs @@ -35,8 +35,8 @@ impl TransportUnicastLowlatency { if let Some(callback) = callback.as_ref() { #[cfg(feature = "shared-memory")] { - if self.config.is_shm { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shm().reader)?; + if self.config.shm.is_some() { + crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr)?; } } callback.handle_message(msg) diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index dcc9fc8476..726d21bb84 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -193,7 +193,7 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool { - self.config.is_shm + self.config.shm.is_some() } fn is_qos(&self) -> bool { diff --git a/io/zenoh-transport/src/unicast/lowlatency/tx.rs b/io/zenoh-transport/src/unicast/lowlatency/tx.rs index 38751eb61d..d573544340 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/tx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/tx.rs @@ -20,6 +20,9 @@ use zenoh_protocol::{ use zenoh_result::bail; use zenoh_result::ZResult; +#[cfg(feature = "shared-memory")] +use crate::shm::map_zmsg_to_partner; + impl TransportUnicastLowlatency { #[allow(unused_mut)] // When feature "shared-memory" is not enabled #[allow(clippy::let_and_return)] // When feature "stats" is not enabled @@ -27,12 +30,7 @@ impl TransportUnicastLowlatency { pub(crate) fn internal_schedule(&self, mut msg: NetworkMessage) -> ZResult<()> { #[cfg(feature = "shared-memory")] { - let res = if self.config.is_shm { - crate::shm::map_zmsg_to_shminfo(&mut msg) - } else { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shm().reader) - }; - if let Err(e) = res { + if let Err(e) = map_zmsg_to_partner(&mut msg, &self.config.shm) { bail!("Failed SHM conversion: {}", e); } } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 93e9d4da80..6844f30163 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // #[cfg(feature = "shared-memory")] -use super::shared_memory_unicast::SharedMemoryUnicast; +use super::establishment::ext::shm::AuthUnicast; use super::{link::LinkUnicastWithOpenAck, transport_unicast_inner::InitTransportResult}; #[cfg(feature = "transport_auth")] use crate::unicast::establishment::ext::auth::Auth; @@ -49,6 +49,8 @@ use zenoh_protocol::{ transport::{close, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; +#[cfg(feature = "shared-memory")] +use zenoh_shm::reader::SharedMemoryReader; /*************************************/ /* TRANSPORT CONFIG */ @@ -82,9 +84,10 @@ pub struct TransportManagerStateUnicast { // Active authenticators #[cfg(feature = "transport_auth")] pub(super) authenticator: Arc, - // Shared memory + // SHM probing + // Option will be None if SHM is disabled by Config #[cfg(feature = "shared-memory")] - pub(super) shm: Arc, + pub(super) auth_shm: Option, } pub struct TransportManagerParamsUnicast { @@ -211,6 +214,7 @@ impl TransportManagerBuilderUnicast { pub fn build( self, #[allow(unused)] prng: &mut PseudoRng, // Required for #[cfg(feature = "transport_multilink")] + #[cfg(feature = "shared-memory")] shm_reader: &SharedMemoryReader, ) -> ZResult { if self.is_qos && self.is_lowlatency { bail!("'qos' and 'lowlatency' options are incompatible"); @@ -238,10 +242,15 @@ impl TransportManagerBuilderUnicast { transports: Arc::new(AsyncMutex::new(HashMap::new())), #[cfg(feature = "transport_multilink")] multilink: Arc::new(MultiLink::make(prng)?), - #[cfg(feature = "shared-memory")] - shm: Arc::new(SharedMemoryUnicast::make()?), #[cfg(feature = "transport_auth")] authenticator: Arc::new(self.authenticator), + #[cfg(feature = "shared-memory")] + auth_shm: match self.is_shm { + true => Some(AuthUnicast::new( + shm_reader.supported_protocols().as_slice(), + )?), + false => None, + }, }; let params = TransportManagerParamsUnicast { config, state }; @@ -288,11 +297,6 @@ impl TransportManager { TransportManagerBuilderUnicast::default() } - #[cfg(feature = "shared-memory")] - pub(crate) fn shm(&self) -> &Arc { - &self.state.unicast.shm - } - pub async fn close_unicast(&self) { tracing::trace!("TransportManagerUnicast::clear())"); @@ -590,14 +594,14 @@ impl TransportManager { "shared-memory", { tracing::debug!( - "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, shm: {}, multilink: {}, lowlatency: {}", + "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, shm: {:?}, multilink: {}, lowlatency: {}", self.config.zid, config.zid, config.whatami, config.sn_resolution, config.tx_initial_sn, config.is_qos, - config.is_shm, + config.shm, is_multilink, config.is_lowlatency ); diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index 55226f287c..630b56aa1b 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -22,7 +22,7 @@ pub(crate) mod universal; pub mod test_helpers; #[cfg(feature = "shared-memory")] -pub(crate) mod shared_memory_unicast; +use crate::shm::TransportShmConfig; use self::transport_unicast_inner::TransportUnicastTrait; @@ -54,7 +54,7 @@ pub(crate) struct TransportConfigUnicast { #[cfg(feature = "transport_multilink")] pub(crate) multilink: Option, #[cfg(feature = "shared-memory")] - pub(crate) is_shm: bool, + pub(crate) shm: Option, pub(crate) is_lowlatency: bool, } diff --git a/io/zenoh-transport/src/unicast/shared_memory_unicast.rs b/io/zenoh-transport/src/unicast/shared_memory_unicast.rs deleted file mode 100644 index 881e6886d2..0000000000 --- a/io/zenoh-transport/src/unicast/shared_memory_unicast.rs +++ /dev/null @@ -1,57 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use rand::{Rng, SeedableRng}; -use tokio::sync::RwLock; -use zenoh_core::zerror; -use zenoh_crypto::PseudoRng; -use zenoh_result::ZResult; -use zenoh_shm::{SharedMemoryBuf, SharedMemoryManager, SharedMemoryReader}; - -pub(crate) type Challenge = u64; -const NAME: &str = "zshm"; - -/*************************************/ -/* Authenticator */ -/*************************************/ -pub(crate) struct SharedMemoryUnicast { - // Rust guarantees that fields are dropped in the order of declaration. - // Buffer needs to be dropped before the manager. - pub(crate) challenge: SharedMemoryBuf, - pub(crate) _manager: SharedMemoryManager, - pub(crate) reader: RwLock, -} - -unsafe impl Sync for SharedMemoryUnicast {} - -impl SharedMemoryUnicast { - pub fn make() -> ZResult { - // Create a challenge for session establishment - let mut prng = PseudoRng::from_entropy(); - let nonce = prng.gen::(); - let size = std::mem::size_of::(); - - let mut _manager = SharedMemoryManager::make(format!("{NAME}.{nonce}"), size)?; - - let mut challenge = _manager.alloc(size).map_err(|e| zerror!("{e}"))?; - let slice = unsafe { challenge.as_mut_slice() }; - slice[0..size].copy_from_slice(&nonce.to_le_bytes()); - - let shmauth = SharedMemoryUnicast { - challenge, - _manager, - reader: RwLock::new(SharedMemoryReader::new()), - }; - Ok(shmauth) - } -} diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 9a85ee9a46..8d5d703be1 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -236,7 +236,7 @@ async fn rx_task( where T: ZSliceBuffer + 'static, F: Fn() -> T, - RecyclingObject: ZSliceBuffer, + RecyclingObject: AsMut<[u8]> + ZSliceBuffer, { let batch = link .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 83b2884a59..3edf57f507 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -42,8 +42,8 @@ impl TransportUnicastUniversal { ) -> ZResult<()> { #[cfg(feature = "shared-memory")] { - if self.config.is_shm { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.unicast.shm.reader)?; + if self.config.shm.is_some() { + crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr)?; } } callback.handle_message(msg) diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 58acd5c4b2..5f581673e9 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -333,7 +333,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool { - self.config.is_shm + self.config.shm.is_some() } fn is_qos(&self) -> bool { diff --git a/io/zenoh-transport/src/unicast/universal/tx.rs b/io/zenoh-transport/src/unicast/universal/tx.rs index ffc162c0b4..a381bb4d29 100644 --- a/io/zenoh-transport/src/unicast/universal/tx.rs +++ b/io/zenoh-transport/src/unicast/universal/tx.rs @@ -15,6 +15,9 @@ use super::transport::TransportUnicastUniversal; use zenoh_core::zread; use zenoh_protocol::network::NetworkMessage; +#[cfg(feature = "shared-memory")] +use crate::shm::map_zmsg_to_partner; + impl TransportUnicastUniversal { fn schedule_on_link(&self, msg: NetworkMessage) -> bool { macro_rules! zpush { @@ -61,12 +64,7 @@ impl TransportUnicastUniversal { pub(crate) fn internal_schedule(&self, mut msg: NetworkMessage) -> bool { #[cfg(feature = "shared-memory")] { - let res = if self.config.is_shm { - crate::shm::map_zmsg_to_shminfo(&mut msg) - } else { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shm().reader) - }; - if let Err(e) = res { + if let Err(e) = map_zmsg_to_partner(&mut msg, &self.config.shm) { tracing::trace!("Failed SHM conversion: {}", e); return false; } diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index b07666af9c..d5eb62c961 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -168,13 +168,19 @@ mod tests { // Open transport -> This should be accepted println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer01_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer01_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer01_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer01_manager.get_transports_multicast()) + ); println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer02_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer02_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer02_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer02_manager.get_transports_multicast()) + ); // Wait to for peer 01 and 02 to join each other ztimeout!(async { @@ -186,10 +192,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer01_transport = peer01_manager - .get_transport_multicast(&peer02_id) - .await - .unwrap(); + let peer01_transport = + ztimeout!(peer01_manager.get_transport_multicast(&peer02_id)).unwrap(); println!( "\tPeer01 peers: {:?}", peer01_transport.get_peers().unwrap() @@ -204,10 +208,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer02_transport = peer02_manager - .get_transport_multicast(&peer01_id) - .await - .unwrap(); + let peer02_transport = + ztimeout!(peer02_manager.get_transport_multicast(&peer01_id)).unwrap(); println!( "\tPeer02 peers: {:?}", peer02_transport.get_peers().unwrap() @@ -235,7 +237,7 @@ mod tests { // Close the peer01 transport println!("Closing transport with {endpoint}"); ztimeout!(peer01.transport.close()).unwrap(); - assert!(peer01.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer01.manager.get_transports_multicast()).is_empty()); ztimeout!(async { while !peer02.transport.get_peers().unwrap().is_empty() { tokio::time::sleep(SLEEP_COUNT).await; @@ -245,7 +247,7 @@ mod tests { // Close the peer02 transport println!("Closing transport with {endpoint}"); ztimeout!(peer02.transport.close()).unwrap(); - assert!(peer02.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer02.manager.get_transports_multicast()).is_empty()); // Wait a little bit tokio::time::sleep(SLEEP).await; diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index 472af837ea..20ceb49218 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -164,13 +164,19 @@ mod tests { // Open transport -> This should be accepted println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer01_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer01_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer01_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer01_manager.get_transports_multicast()) + ); println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer02_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer02_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer02_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer02_manager.get_transports_multicast()) + ); // Wait to for peer 01 and 02 to join each other ztimeout!(async { @@ -182,10 +188,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer01_transport = peer01_manager - .get_transport_multicast(&peer02_id) - .await - .unwrap(); + let peer01_transport = + ztimeout!(peer01_manager.get_transport_multicast(&peer02_id)).unwrap(); println!( "\tPeer01 peers: {:?}", peer01_transport.get_peers().unwrap() @@ -200,10 +204,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer02_transport = peer02_manager - .get_transport_multicast(&peer01_id) - .await - .unwrap(); + let peer02_transport = + ztimeout!(peer02_manager.get_transport_multicast(&peer01_id)).unwrap(); println!( "\tPeer02 peers: {:?}", peer02_transport.get_peers().unwrap() @@ -231,7 +233,7 @@ mod tests { // Close the peer01 transport println!("Closing transport with {endpoint}"); ztimeout!(peer01.transport.close()).unwrap(); - assert!(peer01.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer01.manager.get_transports_multicast()).is_empty()); ztimeout!(async { while !peer02.transport.get_peers().unwrap().is_empty() { tokio::time::sleep(SLEEP_COUNT).await; @@ -241,7 +243,7 @@ mod tests { // Close the peer02 transport println!("Closing transport with {endpoint}"); ztimeout!(peer02.transport.close()).unwrap(); - assert!(peer02.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer02.manager.get_transports_multicast()).is_empty()); // Wait a little bit tokio::time::sleep(SLEEP).await; diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index 1cad7c6a63..abcf011eed 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -291,10 +291,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { ]; let router_pri_key = RsaPrivateKey::from_components(n, e, d, primes).unwrap(); let mut auth_pubkey = AuthPubKey::new(router_pub_key.into(), router_pri_key.into()); - auth_pubkey - .add_pubkey(client01_pub_key.into()) - .await - .unwrap(); + ztimeout!(auth_pubkey.add_pubkey(client01_pub_key.into())).unwrap(); let mut auth = Auth::empty(); auth.set_pubkey(Some(auth_pubkey)); let unicast = make_basic_transport_manager_builder( @@ -315,7 +312,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { // Add the locator on the router ztimeout!(router_manager.add_listener(endpoint.clone())).unwrap(); println!("Transport Authenticator PubKey [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Authenticator PubKey [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -344,10 +341,10 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { // Add client02 pubkey to the router let router_auth_handle = router_manager.get_auth_handle_unicast(); - zasyncwrite!(router_auth_handle.get_pubkey().unwrap()) - .add_pubkey(client02_pub_key.into()) - .await - .unwrap(); + ztimeout!( + zasyncwrite!(router_auth_handle.get_pubkey().unwrap()).add_pubkey(client02_pub_key.into()) + ) + .unwrap(); /* [3b] */ // Open a first transport from client02 to the router @@ -435,13 +432,9 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { let router_handler = Arc::new(SHRouterAuthenticator::new()); // Create the router transport manager let mut auth_usrpwd_router = AuthUsrPwd::new(None); - auth_usrpwd_router - .add_user(user01.clone().into(), password01.clone().into()) - .await + ztimeout!(auth_usrpwd_router.add_user(user01.clone().into(), password01.clone().into())) .unwrap(); - auth_usrpwd_router - .add_user(user03.clone().into(), password03.clone().into()) - .await + ztimeout!(auth_usrpwd_router.add_user(user03.clone().into(), password03.clone().into())) .unwrap(); let mut auth_router = Auth::empty(); auth_router.set_usrpwd(Some(auth_usrpwd_router)); @@ -520,7 +513,7 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { println!("Transport Authenticator UserPassword [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Authenticator UserPassword [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Authenticator UserPassword [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index d052ed9313..6f80e7dd58 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -215,10 +215,7 @@ mod tests { let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); } - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Return the handlers ( @@ -357,13 +354,12 @@ mod tests { { let c_stats = client_transport.get_stats().unwrap().report(); println!("\tClient: {:?}", c_stats); - let r_stats = router_manager - .get_transport_unicast(&client_manager.config.zid) - .await - .unwrap() - .get_stats() - .map(|s| s.report()) - .unwrap(); + let r_stats = + ztimeout!(router_manager.get_transport_unicast(&client_manager.config.zid)) + .unwrap() + .get_stats() + .map(|s| s.report()) + .unwrap(); println!("\tRouter: {:?}", r_stats); } diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index 54b469d6ec..dc4c0fbd3d 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -145,7 +145,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer01_manager.get_listeners().await; + let locs = ztimeout!(peer01_manager.get_listeners()); println!("[Transport Peer 01b] => Getting endpoints: {c_end01:?} {locs:?}"); assert_eq!(c_end01.len(), locs.len()); @@ -173,11 +173,8 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Waiting... OK"); // Verify that the transport has been correctly open - assert_eq!(peer01_manager.get_transports_unicast().await.len(), 1); - let s02 = peer01_manager - .get_transport_unicast(&c_zid02) - .await - .unwrap(); + assert_eq!(ztimeout!(peer01_manager.get_transports_unicast()).len(), 1); + let s02 = ztimeout!(peer01_manager.get_transport_unicast(&c_zid02)).unwrap(); assert_eq!( s02.get_links().unwrap().len(), c_end01.len() + c_end02.len() @@ -246,7 +243,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer02_manager.get_listeners().await; + let locs = ztimeout!(peer02_manager.get_listeners()); println!("[Transport Peer 02b] => Getting endpoints: {c_end02:?} {locs:?}"); assert_eq!(c_end02.len(), locs.len()); @@ -276,13 +273,10 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Transports: {:?}", - peer02_manager.get_transports_unicast().await + ztimeout!(peer02_manager.get_transports_unicast()) ); - assert_eq!(peer02_manager.get_transports_unicast().await.len(), 1); - let s01 = peer02_manager - .get_transport_unicast(&c_zid01) - .await - .unwrap(); + assert_eq!(ztimeout!(peer02_manager.get_transports_unicast()).len(), 1); + let s01 = ztimeout!(peer02_manager.get_transport_unicast(&c_zid01)).unwrap(); assert_eq!( s01.get_links().unwrap().len(), c_end01.len() + c_end02.len() diff --git a/io/zenoh-transport/tests/unicast_defragmentation.rs b/io/zenoh-transport/tests/unicast_defragmentation.rs index 0e88f40cde..40a513b874 100644 --- a/io/zenoh-transport/tests/unicast_defragmentation.rs +++ b/io/zenoh-transport/tests/unicast_defragmentation.rs @@ -64,10 +64,7 @@ async fn run(endpoint: &EndPoint, channel: Channel, msg_size: usize) { println!("Opening transport with {endpoint}"); let _ = ztimeout!(client_manager.open_transport_unicast(endpoint.clone())).unwrap(); - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Create the message to send let message: NetworkMessage = Push { diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index 5588612c65..14670bf532 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -220,7 +220,7 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) // Add a listener to the router println!("\nTransport Intermittent [1a1]"); let _ = ztimeout!(router_manager.add_listener(endpoint.clone())).unwrap(); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Intermittent [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -228,7 +228,10 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) // Open a transport from client01 to the router let c_ses1 = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())).unwrap(); assert_eq!(c_ses1.get_links().unwrap().len(), 1); - assert_eq!(client01_manager.get_transports_unicast().await.len(), 1); + assert_eq!( + ztimeout!(client01_manager.get_transports_unicast()).len(), + 1 + ); assert_eq!(c_ses1.get_zid().unwrap(), router_id); /* [3] */ @@ -244,7 +247,10 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) let c_ses2 = ztimeout!(c_client02_manager.open_transport_unicast(c_endpoint.clone())).unwrap(); assert_eq!(c_ses2.get_links().unwrap().len(), 1); - assert_eq!(c_client02_manager.get_transports_unicast().await.len(), 1); + assert_eq!( + ztimeout!(c_client02_manager.get_transports_unicast()).len(), + 1 + ); assert_eq!(c_ses2.get_zid().unwrap(), c_router_id); tokio::time::sleep(SLEEP).await; @@ -269,7 +275,10 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) let c_ses3 = ztimeout!(c_client03_manager.open_transport_unicast(c_endpoint.clone())).unwrap(); assert_eq!(c_ses3.get_links().unwrap().len(), 1); - assert_eq!(c_client03_manager.get_transports_unicast().await.len(), 1); + assert_eq!( + ztimeout!(c_client03_manager.get_transports_unicast()).len(), + 1 + ); assert_eq!(c_ses3.get_zid().unwrap(), c_router_id); tokio::time::sleep(SLEEP).await; @@ -361,15 +370,15 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) /* [5] */ // Close the open transport on the client println!("Transport Intermittent [5a1]"); - for s in client01_manager.get_transports_unicast().await.iter() { + for s in ztimeout!(client01_manager.get_transports_unicast()).iter() { ztimeout!(s.close()).unwrap(); } println!("Transport Intermittent [5a2]"); - for s in client02_manager.get_transports_unicast().await.iter() { + for s in ztimeout!(client02_manager.get_transports_unicast()).iter() { ztimeout!(s.close()).unwrap(); } println!("Transport Intermittent [5a3]"); - for s in client03_manager.get_transports_unicast().await.iter() { + for s in ztimeout!(client03_manager.get_transports_unicast()).iter() { ztimeout!(s.close()).unwrap(); } diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index 7952d77b10..c06485fd06 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -134,7 +134,7 @@ mod tests { println!("Transport Open Close [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Open Close [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -148,7 +148,7 @@ mod tests { assert!(res.is_ok()); let c_ses1 = res.unwrap(); println!("Transport Open Close [1d1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [1d2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses1.get_zid().unwrap(), router_id); @@ -188,7 +188,7 @@ mod tests { assert!(res.is_ok()); let c_ses2 = res.unwrap(); println!("Transport Open Close [2b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [2b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses2.get_zid().unwrap(), router_id); @@ -224,7 +224,7 @@ mod tests { println!("Transport Open Close [3a2]: {res:?}"); assert!(res.is_err()); println!("Transport Open Close [3b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [3b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses1.get_zid().unwrap(), router_id); @@ -254,7 +254,7 @@ mod tests { println!("Transport Open Close [4a2]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [4b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [4b2]: {transports:?}"); assert_eq!(transports.len(), 0); @@ -284,7 +284,7 @@ mod tests { assert!(res.is_ok()); let c_ses3 = res.unwrap(); println!("Transport Open Close [5b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [5b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses3.get_zid().unwrap(), router_id); @@ -316,7 +316,7 @@ mod tests { assert!(res.is_ok()); let c_ses4 = res.unwrap(); println!("Transport Open Close [6b1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [6b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses4.get_zid().unwrap(), router_id); @@ -332,7 +332,7 @@ mod tests { println!("Transport Open Close [6d2]: {res:?}"); assert!(res.is_err()); println!("Transport Open Close [6e1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [6e2]: {transports:?}"); assert_eq!(transports.len(), 1); @@ -340,7 +340,7 @@ mod tests { println!("Transport Open Close [6f1]"); ztimeout!(async { tokio::time::sleep(SLEEP).await; - let transports = router_manager.get_transports_unicast().await; + let transports = ztimeout!(router_manager.get_transports_unicast()); assert_eq!(transports.len(), 2); let s = transports .iter() @@ -358,7 +358,7 @@ mod tests { println!("Transport Open Close [7a2]: {res:?}"); assert!(res.is_err()); println!("Transport Open Close [7b1]"); - let transports = client03_manager.get_transports_unicast().await; + let transports = ztimeout!(client03_manager.get_transports_unicast()); println!("Transport Open Close [7b2]: {transports:?}"); assert_eq!(transports.len(), 0); @@ -373,7 +373,7 @@ mod tests { println!("Transport Open Close [8b2]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [8c1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [8c2]: {transports:?}"); assert_eq!(transports.len(), 0); @@ -400,7 +400,7 @@ mod tests { assert!(res.is_ok()); let c_ses4 = res.unwrap(); println!("Transport Open Close [9b1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [9b2]: {transports:?}"); assert_eq!(transports.len(), 1); println!("Transport Open Close [9c1]"); @@ -434,7 +434,7 @@ mod tests { println!("Transport Open Close [9a2]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [9b1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [9b2]: {transports:?}"); assert_eq!(transports.len(), 0); diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index ec897b9382..799290aced 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -151,7 +151,7 @@ async fn openclose_transport( println!("Transport Open Close [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Open Close [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index dd023b9749..fa7f68a8a9 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -227,10 +227,7 @@ async fn open_transport_unicast( let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); } - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Return the handlers ( diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 44ea43179b..637f9f8a86 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -13,7 +13,6 @@ // #[cfg(feature = "shared-memory")] mod tests { - use rand::{Rng, SeedableRng}; use std::{ any::Any, convert::TryFrom, @@ -25,7 +24,6 @@ mod tests { }; use zenoh_buffers::buffer::SplitBuffer; use zenoh_core::ztimeout; - use zenoh_crypto::PseudoRng; use zenoh_link::Link; use zenoh_protocol::{ core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, @@ -35,8 +33,19 @@ mod tests { }, zenoh::{PushBody, Put}, }; - use zenoh_result::{zerror, ZResult}; - use zenoh_shm::{SharedMemoryBuf, SharedMemoryManager}; + use zenoh_result::ZResult; + use zenoh_shm::{ + api::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::{ + BlockOn, GarbageCollect, SharedMemoryProviderBuilder, + }, + }, + SharedMemoryBuf, + }; use zenoh_transport::{ multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, @@ -44,7 +53,6 @@ mod tests { const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); - const USLEEP: Duration = Duration::from_micros(100); const MSG_COUNT: usize = 1_000; const MSG_SIZE: usize = 1_024; @@ -152,22 +160,16 @@ mod tests { let peer_shm02 = ZenohId::try_from([2]).unwrap(); let peer_net01 = ZenohId::try_from([3]).unwrap(); - let mut tries = 100; - let mut prng = PseudoRng::from_entropy(); - let mut shm01 = loop { - // Create the SharedMemoryManager - if let Ok(shm01) = SharedMemoryManager::make( - format!("peer_shm01_{}_{}", endpoint.protocol(), prng.gen::()), - 2 * MSG_SIZE, - ) { - break Ok(shm01); - } - tries -= 1; - if tries == 0 { - break Err(zerror!("Unable to create SharedMemoryManager!")); - } - } - .unwrap(); + // create SHM provider + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(2 * MSG_SIZE) + .unwrap() + .res() + .unwrap(); + let shm01 = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); // Create a peer manager with shared-memory authenticator enabled let peer_shm01_handler = Arc::new(SHPeer::new(true)); @@ -229,35 +231,28 @@ mod tests { // Retrieve the transports println!("Transport SHM [2a]"); - let peer_shm02_transport = peer_shm01_manager - .get_transport_unicast(&peer_shm02) - .await - .unwrap(); + let peer_shm02_transport = + ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_shm02)).unwrap(); assert!(peer_shm02_transport.is_shm().unwrap()); println!("Transport SHM [2b]"); - let peer_net01_transport = peer_shm01_manager - .get_transport_unicast(&peer_net01) - .await - .unwrap(); + let peer_net01_transport = + ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_net01)).unwrap(); assert!(!peer_net01_transport.is_shm().unwrap()); + let layout = shm01.alloc_layout().size(MSG_SIZE).res().unwrap(); + // Send the message println!("Transport SHM [3a]"); // The msg count for (msg_count, _) in (0..MSG_COUNT).enumerate() { // Create the message to send - let mut sbuf = ztimeout!(async { - loop { - match shm01.alloc(MSG_SIZE) { - Ok(sbuf) => break sbuf, - Err(_) => tokio::time::sleep(USLEEP).await, - } - } - }); - - let bs = unsafe { sbuf.as_mut_slice() }; - bs[0..8].copy_from_slice(&msg_count.to_le_bytes()); + let mut sbuf = ztimeout!(layout + .alloc() + .with_policy::>() + .res_async()) + .unwrap(); + sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); let message: NetworkMessage = Push { wire_expr: "test".into(), @@ -296,16 +291,12 @@ mod tests { // The msg count for (msg_count, _) in (0..MSG_COUNT).enumerate() { // Create the message to send - let mut sbuf = ztimeout!(async { - loop { - match shm01.alloc(MSG_SIZE) { - Ok(sbuf) => break sbuf, - Err(_) => tokio::time::sleep(USLEEP).await, - } - } - }); - let bs = unsafe { sbuf.as_mut_slice() }; - bs[0..8].copy_from_slice(&msg_count.to_le_bytes()); + let mut sbuf = ztimeout!(layout + .alloc() + .with_policy::>() + .res_async()) + .unwrap(); + sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); let message: NetworkMessage = Push { wire_expr: "test".into(), diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index d7856fde94..92267458f0 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -160,7 +160,7 @@ mod tests { println!("[Simultaneous 01a] => Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer01_manager.get_listeners().await; + let locs = ztimeout!(peer01_manager.get_listeners()); println!("[Simultaneous 01b] => Getting endpoints: {endpoint01:?} {locs:?}"); assert_eq!(endpoint01.len(), locs.len()); @@ -170,7 +170,7 @@ mod tests { println!("[Simultaneous 02a] => Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer02_manager.get_listeners().await; + let locs = ztimeout!(peer02_manager.get_listeners()); println!("[Simultaneous 02b] => Getting endpoints: {endpoint02:?} {locs:?}"); assert_eq!(endpoint02.len(), locs.len()); diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index a23fa48e96..a4a1e90edb 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -386,10 +386,7 @@ async fn open_transport_unicast( let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); } - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Return the handlers ( @@ -529,9 +526,7 @@ async fn run_single( { let c_stats = client_transport.get_stats().unwrap().report(); println!("\tClient: {:?}", c_stats); - let r_stats = router_manager - .get_transport_unicast(&client_manager.config.zid) - .await + let r_stats = ztimeout!(router_manager.get_transport_unicast(&client_manager.config.zid)) .unwrap() .get_stats() .map(|s| s.report()) diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index c42b3d1b69..ec25e26ab9 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -29,6 +29,9 @@ maintenance = { status = "actively-developed" } [features] unstable = [] default = [] +shared-memory = [ + "zenoh/shared-memory", +] [dependencies] tokio = { workspace = true, features = [ diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index b954ed639c..440065331a 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -35,6 +35,7 @@ shared-memory = [ "zenoh-shm", "zenoh-protocol/shared-memory", "zenoh-transport/shared-memory", + "zenoh-buffers/shared-memory", ] stats = ["zenoh-transport/stats", "zenoh-protocol/stats"] transport_multilink = ["zenoh-transport/transport_multilink"] diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 6f8ba23a65..036271b765 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -15,8 +15,14 @@ //! ZBytes primitives. use crate::buffers::ZBuf; use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, - string::FromUtf8Error, sync::Arc, + borrow::Cow, + convert::Infallible, + fmt::Debug, + marker::PhantomData, + ops::{Add, AddAssign, Deref}, + str::Utf8Error, + string::FromUtf8Error, + sync::Arc, }; use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ @@ -28,8 +34,70 @@ use zenoh_buffers::{ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; -#[cfg(feature = "shared-memory")] -use zenoh_shm::SharedMemoryBuf; +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +use zenoh_shm::{ + api::slice::{ + zsliceshm::{zsliceshm, ZSliceShm}, + zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + }, + SharedMemoryBuf, +}; + +pub enum Cipher { + Aes512(ZSlice), +} + +pub enum Compression { + LZ4, +} + +#[derive(Default)] +pub struct Transformation { + cipher: Option, + compression: Option, +} + +impl From for Transformation { + fn from(value: Cipher) -> Self { + Transformation { + cipher: Some(value), + ..Default::default() + } + } +} + +impl From for Transformation { + fn from(value: Compression) -> Self { + Transformation { + compression: Some(value), + ..Default::default() + } + } +} + +impl Add for Transformation { + type Output = Transformation; + + fn add(mut self, rhs: Self) -> Self::Output { + self += rhs; + self + } +} + +impl AddAssign for Transformation { + fn add_assign(&mut self, rhs: Transformation) { + fn combine(mut lhs: Option, mut rhs: Option) -> Option { + match (lhs.take(), rhs.take()) { + (Some(_), Some(r)) => Some(r), + (None, r) => r, + (l, None) => l, + } + } + + self.cipher = combine(self.cipher.take(), rhs.cipher); + self.compression = combine(self.compression.take(), rhs.compression); + } +} /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { @@ -40,10 +108,11 @@ pub trait Serialize { } pub trait Deserialize<'a, T> { + type Input: 'a; type Error; /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &'a ZBytes) -> Result; + fn deserialize(self, t: Self::Input) -> Result; } /// ZBytes contains the serialized bytes of user data. @@ -128,7 +197,18 @@ impl ZBytes { /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn deserialize<'a, T>(&'a self) -> ZResult where - ZSerde: Deserialize<'a, T>, + ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, + >::Error: Debug, + { + ZSerde + .deserialize(self) + .map_err(|e| zerror!("{:?}", e).into()) + } + + /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn deserialize_mut<'a, T>(&'a mut self) -> ZResult + where + ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes>, >::Error: Debug, { ZSerde @@ -139,7 +219,16 @@ impl ZBytes { /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn into<'a, T>(&'a self) -> T where - ZSerde: Deserialize<'a, T, Error = Infallible>, + ZSerde: Deserialize<'a, T, Input = &'a ZBytes, Error = Infallible>, + >::Error: Debug, + { + ZSerde.deserialize(self).unwrap_infallible() + } + + /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn into_mut<'a, T>(&'a mut self) -> T + where + ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes, Error = Infallible>, >::Error: Debug, { ZSerde.deserialize(self).unwrap_infallible() @@ -192,7 +281,7 @@ where impl Iterator for ZBytesIterator<'_, T> where - for<'a> ZSerde: Deserialize<'a, T>, + for<'a> ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, for<'a> >::Error: Debug, { type Item = T; @@ -311,10 +400,25 @@ impl From<&ZBuf> for ZBytes { } } -impl Deserialize<'_, ZBuf> for ZSerde { +impl Serialize<&mut ZBuf> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZBuf) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut ZBuf> for ZBytes { + fn from(t: &mut ZBuf) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, ZBuf> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { Ok(v.0.clone()) } } @@ -331,6 +435,12 @@ impl From<&ZBytes> for ZBuf { } } +impl From<&mut ZBytes> for ZBuf { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // ZSlice impl Serialize for ZSerde { type Output = ZBytes; @@ -360,10 +470,25 @@ impl From<&ZSlice> for ZBytes { } } -impl Deserialize<'_, ZSlice> for ZSerde { +impl Serialize<&mut ZSlice> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZSlice) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut ZSlice> for ZBytes { + fn from(t: &mut ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, ZSlice> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { Ok(v.0.to_zslice()) } } @@ -380,6 +505,12 @@ impl From<&ZBytes> for ZSlice { } } +impl From<&mut ZBytes> for ZSlice { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // [u8; N] impl Serialize<[u8; N]> for ZSerde { type Output = ZBytes; @@ -409,10 +540,25 @@ impl From<&[u8; N]> for ZBytes { } } -impl Deserialize<'_, [u8; N]> for ZSerde { +impl Serialize<&mut [u8; N]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut [u8; N]) -> Self::Output { + ZBytes::new(*t) + } +} + +impl From<&mut [u8; N]> for ZBytes { + fn from(t: &mut [u8; N]) -> Self { + ZSerde.serialize(*t) + } +} + +impl<'a, const N: usize> Deserialize<'a, [u8; N]> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result<[u8; N], Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<[u8; N], Self::Error> { use std::io::Read; if v.0.len() != N { @@ -441,6 +587,14 @@ impl TryFrom<&ZBytes> for [u8; N] { } } +impl TryFrom<&mut ZBytes> for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Vec impl Serialize> for ZSerde { type Output = ZBytes; @@ -470,10 +624,25 @@ impl From<&Vec> for ZBytes { } } -impl Deserialize<'_, Vec> for ZSerde { +impl Serialize<&mut Vec> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Vec) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut Vec> for ZBytes { + fn from(t: &mut Vec) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, Vec> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Ok(v.0.contiguous().to_vec()) } } @@ -490,6 +659,12 @@ impl From<&ZBytes> for Vec { } } +impl From<&mut ZBytes> for Vec { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // &[u8] impl Serialize<&[u8]> for ZSerde { type Output = ZBytes; @@ -505,6 +680,20 @@ impl From<&[u8]> for ZBytes { } } +impl Serialize<&mut [u8]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut [u8]) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl From<&mut [u8]> for ZBytes { + fn from(t: &mut [u8]) -> Self { + ZSerde.serialize(t) + } +} + // Cow<[u8]> impl<'a> Serialize> for ZSerde { type Output = ZBytes; @@ -534,10 +723,25 @@ impl From<&Cow<'_, [u8]>> for ZBytes { } } +impl<'a> Serialize<&mut Cow<'a, [u8]>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Cow<'a, [u8]>) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl From<&mut Cow<'_, [u8]>> for ZBytes { + fn from(t: &mut Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Ok(v.0.contiguous()) } } @@ -557,6 +761,12 @@ impl<'a> From<&'a ZBytes> for Cow<'a, [u8]> { } } +impl<'a> From<&'a mut ZBytes> for Cow<'a, [u8]> { + fn from(value: &'a mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // String impl Serialize for ZSerde { type Output = ZBytes; @@ -586,10 +796,25 @@ impl From<&String> for ZBytes { } } -impl Deserialize<'_, String> for ZSerde { +impl Serialize<&mut String> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut String) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut String> for ZBytes { + fn from(t: &mut String) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, String> for ZSerde { + type Input = &'a ZBytes; type Error = FromUtf8Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); String::from_utf8(v) } @@ -611,12 +836,20 @@ impl TryFrom<&ZBytes> for String { } } +impl TryFrom<&mut ZBytes> for String { + type Error = FromUtf8Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // &str impl Serialize<&str> for ZSerde { type Output = ZBytes; fn serialize(self, s: &str) -> Self::Output { - Self.serialize(s.to_string()) + ZSerde.serialize(s.to_string()) } } @@ -626,6 +859,20 @@ impl From<&str> for ZBytes { } } +impl Serialize<&mut str> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut str) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut str> for ZBytes { + fn from(t: &mut str) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Serialize> for ZSerde { type Output = ZBytes; @@ -644,7 +891,7 @@ impl<'a> Serialize<&Cow<'a, str>> for ZSerde { type Output = ZBytes; fn serialize(self, s: &Cow<'a, str>) -> Self::Output { - Self.serialize(s.to_string()) + ZSerde.serialize(s.to_string()) } } @@ -654,10 +901,25 @@ impl From<&Cow<'_, str>> for ZBytes { } } +impl<'a> Serialize<&mut Cow<'a, str>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Cow<'a, str>) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Cow<'_, str>> for ZBytes { + fn from(t: &mut Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { + type Input = &'a ZBytes; type Error = Utf8Error; - fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Cow::try_from(v) } } @@ -686,6 +948,18 @@ impl<'a> TryFrom<&'a ZBytes> for Cow<'a, str> { } } +impl<'a> TryFrom<&'a mut ZBytes> for Cow<'a, str> { + type Error = Utf8Error; + + fn try_from(v: &'a mut ZBytes) -> Result { + let v: Cow<'a, [u8]> = Cow::from(v); + let _ = core::str::from_utf8(v.as_ref())?; + // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 + // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. + Ok(unsafe { core::mem::transmute(v) }) + } +} + // - Integers impl macro_rules! impl_int { ($t:ty) => { @@ -725,10 +999,25 @@ macro_rules! impl_int { } } + impl Serialize<&mut $t> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut $t) -> Self::Output { + Self.serialize(*t) + } + } + + impl From<&mut $t> for ZBytes { + fn from(t: &mut $t) -> Self { + ZSerde.serialize(t) + } + } + impl<'a> Deserialize<'a, $t> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result<$t, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<$t, Self::Error> { use std::io::Read; let mut r = v.reader(); @@ -758,6 +1047,14 @@ macro_rules! impl_int { ZSerde.deserialize(value) } } + + impl TryFrom<&mut ZBytes> for $t { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } + } }; } @@ -810,10 +1107,25 @@ impl From<&bool> for ZBytes { } } -impl Deserialize<'_, bool> for ZSerde { +impl Serialize<&mut bool> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut bool) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&mut bool> for ZBytes { + fn from(t: &mut bool) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, bool> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { let p = v.deserialize::().map_err(|_| ZDeserializeError)?; match p { 0 => Ok(false), @@ -839,6 +1151,14 @@ impl TryFrom<&ZBytes> for bool { } } +impl TryFrom<&mut ZBytes> for bool { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // - Zenoh advanced types encoders/decoders // Properties impl Serialize> for ZSerde { @@ -869,10 +1189,25 @@ impl<'s> From<&'s Properties<'s>> for ZBytes { } } +impl Serialize<&mut Properties<'_>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Properties<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl<'s> From<&'s mut Properties<'s>> for ZBytes { + fn from(t: &'s mut Properties<'s>) -> Self { + ZSerde.serialize(&*t) + } +} + impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { + type Input = &'s ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &'s ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { let s = v .deserialize::>() .map_err(|_| ZDeserializeError)?; @@ -897,6 +1232,14 @@ impl<'s> TryFrom<&'s ZBytes> for Properties<'s> { } } +impl<'s> TryFrom<&'s mut ZBytes> for Properties<'s> { + type Error = ZDeserializeError; + + fn try_from(value: &'s mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // JSON impl Serialize for ZSerde { type Output = Result; @@ -932,10 +1275,29 @@ impl TryFrom<&serde_json::Value> for ZBytes { } } -impl Deserialize<'_, serde_json::Value> for ZSerde { +impl Serialize<&mut serde_json::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_json::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_json::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&mut serde_json::Value> for ZBytes { type Error = serde_json::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn try_from(value: &mut serde_json::Value) -> Result { + ZSerde.serialize(&*value) + } +} + +impl<'a> Deserialize<'a, serde_json::Value> for ZSerde { + type Input = &'a ZBytes; + type Error = serde_json::Error; + + fn deserialize(self, v: Self::Input) -> Result { serde_json::from_reader(v.reader()) } } @@ -956,6 +1318,14 @@ impl TryFrom<&ZBytes> for serde_json::Value { } } +impl TryFrom<&mut ZBytes> for serde_json::Value { + type Error = serde_json::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Yaml impl Serialize for ZSerde { type Output = Result; @@ -991,10 +1361,29 @@ impl TryFrom<&serde_yaml::Value> for ZBytes { } } -impl Deserialize<'_, serde_yaml::Value> for ZSerde { +impl Serialize<&mut serde_yaml::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_yaml::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_yaml::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&mut serde_yaml::Value> for ZBytes { + type Error = serde_yaml::Error; + + fn try_from(value: &mut serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_yaml::Value> for ZSerde { + type Input = &'a ZBytes; type Error = serde_yaml::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { serde_yaml::from_reader(v.reader()) } } @@ -1015,6 +1404,14 @@ impl TryFrom<&ZBytes> for serde_yaml::Value { } } +impl TryFrom<&mut ZBytes> for serde_yaml::Value { + type Error = serde_yaml::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // CBOR impl Serialize for ZSerde { type Output = Result; @@ -1050,10 +1447,27 @@ impl TryFrom<&serde_cbor::Value> for ZBytes { } } -impl Deserialize<'_, serde_cbor::Value> for ZSerde { +impl Serialize<&mut serde_cbor::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_cbor::Value) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl TryFrom<&mut serde_cbor::Value> for ZBytes { + type Error = serde_cbor::Error; + + fn try_from(value: &mut serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_cbor::Value> for ZSerde { + type Input = &'a ZBytes; type Error = serde_cbor::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { serde_cbor::from_reader(v.reader()) } } @@ -1074,6 +1488,14 @@ impl TryFrom<&ZBytes> for serde_cbor::Value { } } +impl TryFrom<&mut ZBytes> for serde_cbor::Value { + type Error = serde_cbor::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Pickle impl Serialize for ZSerde { type Output = Result; @@ -1113,10 +1535,27 @@ impl TryFrom<&serde_pickle::Value> for ZBytes { } } -impl Deserialize<'_, serde_pickle::Value> for ZSerde { +impl Serialize<&mut serde_pickle::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_pickle::Value) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl TryFrom<&mut serde_pickle::Value> for ZBytes { type Error = serde_pickle::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn try_from(value: &mut serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_pickle::Value> for ZSerde { + type Input = &'a ZBytes; + type Error = serde_pickle::Error; + + fn deserialize(self, v: Self::Input) -> Result { serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) } } @@ -1137,77 +1576,125 @@ impl TryFrom<&ZBytes> for serde_pickle::Value { } } +impl TryFrom<&mut ZBytes> for serde_pickle::Value { + type Error = serde_pickle::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Shared memory conversion -#[cfg(feature = "shared-memory")] -impl Serialize> for ZSerde { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: Arc) -> Self::Output { - ZBytes::new(t) + fn serialize(self, t: ZSliceShm) -> Self::Output { + let slice: ZSlice = t.into(); + ZBytes::new(slice) } } -#[cfg(feature = "shared-memory")] -impl From> for ZBytes { - fn from(t: Arc) -> Self { + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl From for ZBytes { + fn from(t: ZSliceShm) -> Self { ZSerde.serialize(t) } } -#[cfg(feature = "shared-memory")] -impl Serialize> for ZSerde { +// Shared memory conversion +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: Box) -> Self::Output { - let smb: Arc = t.into(); - Self.serialize(smb) + fn serialize(self, t: ZSliceShmMut) -> Self::Output { + let slice: ZSlice = t.into(); + ZBytes::new(slice) } } -#[cfg(feature = "shared-memory")] -impl From> for ZBytes { - fn from(t: Box) -> Self { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl From for ZBytes { + fn from(t: ZSliceShmMut) -> Self { ZSerde.serialize(t) } } -#[cfg(feature = "shared-memory")] -impl Serialize for ZSerde { - type Output = ZBytes; +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { + type Input = &'a ZBytes; + type Error = ZDeserializeError; - fn serialize(self, t: SharedMemoryBuf) -> Self::Output { - ZBytes::new(t) + fn deserialize(self, v: Self::Input) -> Result<&'a zsliceshm, Self::Error> { + // A ZSliceShm is expected to have only one slice + let mut zslices = v.0.zslices(); + if let Some(zs) = zslices.next() { + if let Some(shmb) = zs.downcast_ref::() { + return Ok(shmb.into()); + } + } + Err(ZDeserializeError) } } -#[cfg(feature = "shared-memory")] -impl From for ZBytes { - fn from(t: SharedMemoryBuf) -> Self { - ZSerde.serialize(t) +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { + type Error = ZDeserializeError; + + fn try_from(value: &'a ZBytes) -> Result { + ZSerde.deserialize(value) } } -#[cfg(feature = "shared-memory")] -impl Deserialize<'_, SharedMemoryBuf> for ZSerde { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result { - // A SharedMemoryBuf is expected to have only one slice - let mut zslices = v.0.zslices(); + fn try_from(value: &'a mut ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { + type Input = &'a mut ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshm, Self::Error> { + // A ZSliceShmBorrowMut is expected to have only one slice + let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_ref::() { - return Ok(shmb.clone()); + if let Some(shmb) = zs.downcast_mut::() { + return Ok(shmb.into()); } } Err(ZDeserializeError) } } -#[cfg(feature = "shared-memory")] -impl TryFrom for SharedMemoryBuf { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { + type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) + fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshmmut, Self::Error> { + // A ZSliceShmBorrowMut is expected to have only one slice + let mut zslices = v.0.zslices_mut(); + if let Some(zs) = zslices.next() { + if let Some(shmb) = zs.downcast_mut::() { + return shmb.try_into().map_err(|_| ZDeserializeError); + } + } + Err(ZDeserializeError) + } +} + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshmmut { + type Error = ZDeserializeError; + + fn try_from(value: &'a mut ZBytes) -> Result { + ZSerde.deserialize(value) } } @@ -1267,16 +1754,17 @@ where } } -impl Deserialize<'_, (A, B)> for ZSerde +impl<'s, A, B> Deserialize<'s, (A, B)> for ZSerde where for<'a> A: TryFrom<&'a ZBytes>, for<'a> >::Error: Debug, for<'b> B: TryFrom<&'b ZBytes>, for<'b> >::Error: Debug, { + type Input = &'s ZBytes; type Error = ZError; - fn deserialize(self, bytes: &ZBytes) -> Result<(A, B), Self::Error> { + fn deserialize(self, bytes: Self::Input) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); let mut reader = bytes.0.reader(); @@ -1320,6 +1808,20 @@ where } } +impl TryFrom<&mut ZBytes> for (A, B) +where + for<'a> A: TryFrom<&'a ZBytes>, + for<'a> >::Error: Debug, + for<'b> B: TryFrom<&'b ZBytes>, + for<'b> >::Error: Debug, +{ + type Error = ZError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // For convenience to always convert a Value in the examples #[derive(Debug, Clone, PartialEq, Eq)] pub enum StringOrBase64 { @@ -1361,6 +1863,13 @@ impl From<&ZBytes> for StringOrBase64 { } } +impl From<&mut ZBytes> for StringOrBase64 { + fn from(v: &mut ZBytes) -> Self { + StringOrBase64::from(&*v) + } +} + +// Protocol attachment extension impl From for AttachmentType { fn from(this: ZBytes) -> Self { AttachmentType { @@ -1384,6 +1893,16 @@ mod tests { use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::Properties; + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + use zenoh_shm::api::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::SharedMemoryProviderBuilder, + slice::zsliceshm::{zsliceshm, ZSliceShm}, + }; + const NUM: usize = 1_000; macro_rules! serialize_deserialize { @@ -1399,81 +1918,118 @@ mod tests { }; } - let mut rng = rand::thread_rng(); - - // unsigned integer - serialize_deserialize!(u8, u8::MIN); - serialize_deserialize!(u16, u16::MIN); - serialize_deserialize!(u32, u32::MIN); - serialize_deserialize!(u64, u64::MIN); - serialize_deserialize!(usize, usize::MIN); - - serialize_deserialize!(u8, u8::MAX); - serialize_deserialize!(u16, u16::MAX); - serialize_deserialize!(u32, u32::MAX); - serialize_deserialize!(u64, u64::MAX); - serialize_deserialize!(usize, usize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(u8, rng.gen::()); - serialize_deserialize!(u16, rng.gen::()); - serialize_deserialize!(u32, rng.gen::()); - serialize_deserialize!(u64, rng.gen::()); - serialize_deserialize!(usize, rng.gen::()); - } + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn numeric() { + let mut rng = rand::thread_rng(); + + // unsigned integer + serialize_deserialize!(u8, u8::MIN); + serialize_deserialize!(u16, u16::MIN); + serialize_deserialize!(u32, u32::MIN); + serialize_deserialize!(u64, u64::MIN); + serialize_deserialize!(usize, usize::MIN); + + serialize_deserialize!(u8, u8::MAX); + serialize_deserialize!(u16, u16::MAX); + serialize_deserialize!(u32, u32::MAX); + serialize_deserialize!(u64, u64::MAX); + serialize_deserialize!(usize, usize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(u8, rng.gen::()); + serialize_deserialize!(u16, rng.gen::()); + serialize_deserialize!(u32, rng.gen::()); + serialize_deserialize!(u64, rng.gen::()); + serialize_deserialize!(usize, rng.gen::()); + } - // signed integer - serialize_deserialize!(i8, i8::MIN); - serialize_deserialize!(i16, i16::MIN); - serialize_deserialize!(i32, i32::MIN); - serialize_deserialize!(i64, i64::MIN); - serialize_deserialize!(isize, isize::MIN); - - serialize_deserialize!(i8, i8::MAX); - serialize_deserialize!(i16, i16::MAX); - serialize_deserialize!(i32, i32::MAX); - serialize_deserialize!(i64, i64::MAX); - serialize_deserialize!(isize, isize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(i8, rng.gen::()); - serialize_deserialize!(i16, rng.gen::()); - serialize_deserialize!(i32, rng.gen::()); - serialize_deserialize!(i64, rng.gen::()); - serialize_deserialize!(isize, rng.gen::()); - } + // signed integer + serialize_deserialize!(i8, i8::MIN); + serialize_deserialize!(i16, i16::MIN); + serialize_deserialize!(i32, i32::MIN); + serialize_deserialize!(i64, i64::MIN); + serialize_deserialize!(isize, isize::MIN); + + serialize_deserialize!(i8, i8::MAX); + serialize_deserialize!(i16, i16::MAX); + serialize_deserialize!(i32, i32::MAX); + serialize_deserialize!(i64, i64::MAX); + serialize_deserialize!(isize, isize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(i8, rng.gen::()); + serialize_deserialize!(i16, rng.gen::()); + serialize_deserialize!(i32, rng.gen::()); + serialize_deserialize!(i64, rng.gen::()); + serialize_deserialize!(isize, rng.gen::()); + } - // float - serialize_deserialize!(f32, f32::MIN); - serialize_deserialize!(f64, f64::MIN); + // float + serialize_deserialize!(f32, f32::MIN); + serialize_deserialize!(f64, f64::MIN); - serialize_deserialize!(f32, f32::MAX); - serialize_deserialize!(f64, f64::MAX); + serialize_deserialize!(f32, f32::MAX); + serialize_deserialize!(f64, f64::MAX); - for _ in 0..NUM { - serialize_deserialize!(f32, rng.gen::()); - serialize_deserialize!(f64, rng.gen::()); + for _ in 0..NUM { + serialize_deserialize!(f32, rng.gen::()); + serialize_deserialize!(f64, rng.gen::()); + } + } + numeric(); + + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn basic() { + // String + serialize_deserialize!(String, ""); + serialize_deserialize!(String, String::from("abcdef")); + + // Cow + serialize_deserialize!(Cow, Cow::from("")); + serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); + + // Vec + serialize_deserialize!(Vec, vec![0u8; 0]); + serialize_deserialize!(Vec, vec![0u8; 64]); + + // Cow<[u8]> + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); + + // ZBuf + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + } + basic(); + + // SHM + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + { + // create an SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc_layout().size(1024).res().unwrap(); + + // allocate an SHM buffer + let mutable_shm_buf = layout.alloc().res().unwrap(); + + // convert to immutable SHM buffer + let immutable_shm_buf: ZSliceShm = mutable_shm_buf.into(); + + serialize_deserialize!(&zsliceshm, immutable_shm_buf); } - - // String - serialize_deserialize!(String, ""); - serialize_deserialize!(String, String::from("abcdef")); - - // Cow - serialize_deserialize!(Cow, Cow::from("")); - serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); - - // Vec - serialize_deserialize!(Vec, vec![0u8; 0]); - serialize_deserialize!(Vec, vec![0u8; 64]); - - // Cow<[u8]> - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); - - // ZBuf - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); // Properties serialize_deserialize!(Properties, Properties::from("")); diff --git a/zenoh/src/encoding.rs b/zenoh/src/encoding.rs index 81dfb04752..2b21765d38 100644 --- a/zenoh/src/encoding.rs +++ b/zenoh/src/encoding.rs @@ -17,7 +17,7 @@ use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::EncodingId; #[cfg(feature = "shared-memory")] -use ::{std::sync::Arc, zenoh_shm::SharedMemoryBuf}; +use zenoh_shm::api::slice::{zsliceshm::ZSliceShm, zsliceshmmut::ZSliceShmMut}; /// Default encoding values used by Zenoh. /// @@ -835,16 +835,10 @@ impl EncodingMapping for serde_pickle::Value { // - Zenoh SHM #[cfg(feature = "shared-memory")] -impl EncodingMapping for Arc { +impl EncodingMapping for ZSliceShm { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } - -#[cfg(feature = "shared-memory")] -impl EncodingMapping for Box { - const ENCODING: Encoding = Encoding::ZENOH_BYTES; -} - #[cfg(feature = "shared-memory")] -impl EncodingMapping for SharedMemoryBuf { +impl EncodingMapping for ZSliceShmMut { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 7e25375d64..ac7d8b3059 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -88,10 +88,16 @@ use net::runtime::Runtime; use prelude::*; use scouting::ScoutBuilder; use std::future::Ready; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +use std::sync::Arc; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; pub use zenoh_macros::{ke, kedefine, keformat, kewrite}; use zenoh_protocol::core::WhatAmIMatcher; use zenoh_result::{zerror, ZResult}; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +pub use zenoh_shm::api as shm; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; use zenoh_util::concat_enabled_features; /// A zenoh error. @@ -148,8 +154,6 @@ pub mod queryable; pub mod sample; pub mod subscriber; pub mod value; -#[cfg(feature = "shared-memory")] -pub use zenoh_shm as shm; /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. @@ -252,7 +256,11 @@ where TryIntoConfig: std::convert::TryInto + Send + 'static, >::Error: std::fmt::Debug, { - OpenBuilder { config } + OpenBuilder { + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients: None, + } } /// A builder returned by [`open`] used to open a zenoh [`Session`]. @@ -273,6 +281,20 @@ where >::Error: std::fmt::Debug, { config: TryIntoConfig, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients: Option>, +} + +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +impl OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + pub fn with_shm_clients(mut self, shm_clients: Arc) -> Self { + self.shm_clients = Some(shm_clients); + self + } } impl Resolvable for OpenBuilder @@ -293,7 +315,12 @@ where .config .try_into() .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; - Session::new(config).res_sync() + Session::new( + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + self.shm_clients, + ) + .res_sync() } } diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 68e121847d..ee58bc5b5d 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -40,6 +40,10 @@ use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; use zenoh_protocol::network::NetworkMessage; use zenoh_result::{bail, ZResult}; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +use zenoh_shm::api::client_storage::SharedMemoryClientStorage; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +use zenoh_shm::reader::SharedMemoryReader; use zenoh_sync::get_mut_unchecked; use zenoh_task::TaskController; use zenoh_transport::{ @@ -47,6 +51,33 @@ use zenoh_transport::{ TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; +#[derive(Default)] +pub struct RuntimeBuilder { + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients: Option>, +} + +impl RuntimeBuilder { + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + pub fn shm_clients(mut self, shm_clients: Arc) -> Self { + self.shm_clients = Some(shm_clients); + self + } + + pub async fn build(self, config: Config) -> ZResult { + let mut runtime = Runtime::init( + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + self.shm_clients, + ) + .await?; + match runtime.start().await { + Ok(()) => Ok(runtime), + Err(err) => Err(err), + } + } +} + struct RuntimeState { zid: ZenohId, whatami: WhatAmI, @@ -89,14 +120,19 @@ impl PluginStartArgs for Runtime {} impl Runtime { pub async fn new(config: Config) -> ZResult { - let mut runtime = Runtime::init(config).await?; - match runtime.start().await { - Ok(()) => Ok(runtime), - Err(err) => Err(err), - } + Self::builder().build(config).await + } + + pub fn builder() -> RuntimeBuilder { + RuntimeBuilder::default() } - pub(crate) async fn init(config: Config) -> ZResult { + pub(crate) async fn init( + config: Config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] shm_clients: Option< + Arc, + >, + ) -> ZResult { tracing::debug!("Zenoh Rust API {}", GIT_VERSION); let zid = *config.id(); @@ -118,8 +154,18 @@ impl Runtime { .from_config(&config) .await? .whatami(whatami) - .zid(zid) - .build(handler.clone())?; + .zid(zid); + + #[cfg(feature = "unstable")] + let transport_manager = zcondfeat!( + "shared-memory", + transport_manager.shm_reader(shm_clients.map(SharedMemoryReader::new)), + transport_manager + ) + .build(handler.clone())?; + + #[cfg(not(feature = "unstable"))] + let transport_manager = transport_manager.build(handler.clone())?; let config = Notifier::new(config); diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index c59cca8b9e..a2ea5a768b 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -135,9 +135,9 @@ impl

ValueBuilderTrait for PublicationBuilder { } } - fn payload(self, payload: IntoPayload) -> Self + fn payload(self, payload: IntoZBytes) -> Self where - IntoPayload: Into, + IntoZBytes: Into, { Self { kind: PublicationBuilderPut { @@ -418,9 +418,9 @@ impl<'a> Publisher<'a> { /// # } /// ``` #[inline] - pub fn put(&self, payload: IntoPayload) -> PublisherPutBuilder<'_> + pub fn put(&self, payload: IntoZBytes) -> PublisherPutBuilder<'_> where - IntoPayload: Into, + IntoZBytes: Into, { PublicationBuilder { publisher: self, diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 563df461b8..d5d4de5d0b 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -142,15 +142,15 @@ impl Query { /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply<'b, TryIntoKeyExpr, IntoPayload>( + pub fn reply<'b, TryIntoKeyExpr, IntoZBytes>( &self, key_expr: TryIntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, ) -> ReplyPutBuilder<'_, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoPayload: Into, + IntoZBytes: Into, { ReplyBuilder { query: self, @@ -531,6 +531,8 @@ impl SyncResolve for ReplyErrBuilder<'_> { payload: ResponseBody::Err(zenoh::Err { encoding: self.value.encoding.into(), ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, ext_unknown: vec![], payload: self.value.payload.into(), }), diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs index cab5c2333a..4b2f0d751d 100644 --- a/zenoh/src/sample/builder.rs +++ b/zenoh/src/sample/builder.rs @@ -76,13 +76,13 @@ pub struct SampleBuilder { } impl SampleBuilder { - pub fn put( + pub fn put( key_expr: IntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, ) -> SampleBuilder where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoZBytes: Into, { Self { sample: Sample { diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index 6078a5a350..b5dbd727ec 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -59,15 +59,15 @@ pub(crate) struct DataInfo { } pub(crate) trait DataInfoIntoSample { - fn into_sample( + fn into_sample( self, key_expr: IntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into; + IntoZBytes: Into; } impl DataInfoIntoSample for DataInfo { @@ -76,15 +76,15 @@ impl DataInfoIntoSample for DataInfo { // The test for it is intentionally not added to avoid inserting extra "if" into hot path. // The correctness of the data should be ensured by the caller. #[inline] - fn into_sample( + fn into_sample( self, key_expr: IntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoZBytes: Into, { Sample { key_expr: key_expr.into(), @@ -106,15 +106,15 @@ impl DataInfoIntoSample for DataInfo { impl DataInfoIntoSample for Option { #[inline] - fn into_sample( + fn into_sample( self, key_expr: IntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, #[cfg(feature = "unstable")] attachment: Option, ) -> Sample where IntoKeyExpr: Into>, - IntoPayload: Into, + IntoZBytes: Into, { if let Some(data_info) = self { data_info.into_sample( diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 4a6a312dcf..465f03bf14 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -80,6 +80,8 @@ use zenoh_protocol::{ }, }; use zenoh_result::ZResult; +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +use zenoh_shm::api::client_storage::SharedMemoryClientStorage; use zenoh_task::TaskController; use zenoh_util::core::AsyncResolve; @@ -705,15 +707,15 @@ impl Session { /// # } /// ``` #[inline] - pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoPayload>( + pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoZBytes>( &'a self, key_expr: TryIntoKeyExpr, - payload: IntoPayload, + payload: IntoZBytes, ) -> SessionPutBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoPayload: Into, + IntoZBytes: Into, { PublicationBuilder { publisher: self.declare_publisher(key_expr), @@ -832,12 +834,23 @@ impl Session { } #[allow(clippy::new_ret_no_self)] - pub(super) fn new(config: Config) -> impl Resolve> { + pub(super) fn new( + config: Config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] shm_clients: Option< + Arc, + >, + ) -> impl Resolve> { ResolveFuture::new(async move { tracing::debug!("Config: {:?}", &config); let aggregated_subscribers = config.aggregation().subscribers().clone(); let aggregated_publishers = config.aggregation().publishers().clone(); - match Runtime::init(config).await { + match Runtime::init( + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients, + ) + .await + { Ok(mut runtime) => { let mut session = Self::init( runtime.clone(), diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 26165334eb..3360d95c96 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -15,7 +15,7 @@ //! Value primitives. use crate::{bytes::ZBytes, encoding::Encoding}; -/// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. +/// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the payload's [`ZBytes`] should be interpreted. #[non_exhaustive] #[derive(Clone, Debug, PartialEq, Eq)] pub struct Value { @@ -24,7 +24,7 @@ pub struct Value { } impl Value { - /// Creates a new [`Value`] with specified [`Payload`] and [`Encoding`]. + /// Creates a new [`Value`] with specified [`ZBytes`] and [`Encoding`]. pub fn new(payload: T, encoding: E) -> Self where T: Into, @@ -48,7 +48,7 @@ impl Value { self.payload.is_empty() && self.encoding == Encoding::default() } - /// Gets binary [`Payload`] of this [`Value`]. + /// Gets binary [`ZBytes`] of this [`Value`]. pub fn payload(&self) -> &ZBytes { &self.payload } diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 9c807bd121..9fd00788f4 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -42,16 +42,14 @@ async fn close_session(session: Session) { async fn zenoh_events() { let session = open_session(&["tcp/127.0.0.1:18447"], &[]).await; let zid = session.zid(); - let sub1 = session + let sub1 = ztimeout!(session .declare_subscriber(format!("@/session/{zid}/transport/unicast/*")) - .res() - .await - .unwrap(); - let sub2 = session + .res()) + .unwrap(); + let sub2 = ztimeout!(session .declare_subscriber(format!("@/session/{zid}/transport/unicast/*/link/*")) - .res() - .await - .unwrap(); + .res()) + .unwrap(); let session2 = open_session(&["tcp/127.0.0.1:18448"], &["tcp/127.0.0.1:18447"]).await; let zid2 = session2.zid(); @@ -104,7 +102,7 @@ async fn zenoh_events() { assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); - sub2.undeclare().res().await.unwrap(); - sub1.undeclare().res().await.unwrap(); + ztimeout!(sub2.undeclare().res()).unwrap(); + ztimeout!(sub1.undeclare().res()).unwrap(); close_session(session).await; } diff --git a/zenoh/tests/payload.rs b/zenoh/tests/payload.rs new file mode 100644 index 0000000000..d9910bedf5 --- /dev/null +++ b/zenoh/tests/payload.rs @@ -0,0 +1,97 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +#[test] +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +fn shm_payload_single_buf() { + use zenoh::shm::slice::zsliceshm::{zsliceshm, ZSliceShm}; + use zenoh::shm::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; + use zenoh::{ + bytes::ZBytes, + shm::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::SharedMemoryProviderBuilder, + }, + }; + + // create an SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc_layout().size(1024).res().unwrap(); + + // allocate an SHM buffer + let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); + + // get data + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // convert into immutable owned buffer + let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); + + // get data + let _data: &[u8] = &owned_shm_buf; + + // convert again into mutable owned buffer + let mut owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); + + // get data + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // build a ZBytes from an SHM buffer + let mut payload: ZBytes = owned_shm_buf_mut.into(); + + { + // deserialize ZBytes as borrowed zsliceshm + let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); + + // get data + let _data: &[u8] = borrowed_shm_buf; + + // construct owned buffer from borrowed type + let owned = borrowed_shm_buf.to_owned(); + + // get data + let _data: &[u8] = &owned; + } + + { + // deserialize ZBytes as mutably borrowed zsliceshm + let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); + + // get data + let _data: &[u8] = borrowed_shm_buf; + + // convert zsliceshm to zsliceshmmut + let borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); + + // get data + let _data: &[u8] = borrowed_shm_buf_mut; + let _data_mut: &mut [u8] = borrowed_shm_buf_mut; + } +} diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index d80e9bd8d9..b6a0e9d226 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -127,14 +127,14 @@ impl Task { // The Queryable task keeps replying to requested messages until all checkpoints are finished. Self::Queryable(ke, payload_size) => { - let queryable = session.declare_queryable(ke).res_async().await?; + let queryable = ztimeout!(session.declare_queryable(ke).res_async())?; let payload = vec![0u8; *payload_size]; loop { tokio::select! { _ = token.cancelled() => break, query = queryable.recv_async() => { - query?.reply(ke.to_owned(), payload.clone()).res_async().await?; + ztimeout!(query?.reply(ke.to_owned(), payload.clone()).res_async())?; }, } } @@ -279,7 +279,7 @@ impl Recipe { // In case of client can't connect to some peers/routers loop { - if let Ok(session) = zenoh::open(config.clone()).res_async().await { + if let Ok(session) = ztimeout!(zenoh::open(config.clone()).res_async()) { break session.into_arc(); } else { tokio::time::sleep(Duration::from_secs(1)).await; @@ -315,11 +315,7 @@ impl Recipe { // node_task_tracker.wait().await; // Close the session once all the task assoicated with the node are done. - Arc::try_unwrap(session) - .unwrap() - .close() - .res_async() - .await?; + ztimeout!(Arc::try_unwrap(session).unwrap().close().res_async())?; println!("Node: {} is closed.", &node.name); Result::Ok(()) diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs new file mode 100644 index 0000000000..2a9685eb36 --- /dev/null +++ b/zenoh/tests/shm.rs @@ -0,0 +1,204 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +mod tests { + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Arc; + use std::time::Duration; + use zenoh::prelude::r#async::*; + use zenoh::shm::protocol_implementations::posix::posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend; + use zenoh::shm::protocol_implementations::posix::protocol_id::POSIX_PROTOCOL_ID; + use zenoh::shm::provider::shared_memory_provider::{ + BlockOn, GarbageCollect, SharedMemoryProviderBuilder, + }; + use zenoh_core::ztimeout; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + + const MSG_COUNT: usize = 1_00; + const MSG_SIZE: [usize; 2] = [1_024, 100_000]; + + async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { + // Open the sessions + let mut config = config::peer(); + config.listen.endpoints = endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][01a] Opening peer01 session: {:?}", endpoints); + let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + + let mut config = config::peer(); + config.connect.endpoints = endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][02a] Opening peer02 session: {:?}", endpoints); + let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + + (peer01, peer02) + } + + async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, Session) { + // Open the sessions + let mut config = config::peer(); + config.listen.endpoints = vec![endpoint01.parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(true)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][01a] Opening peer01 session: {}", endpoint01); + let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + + let mut config = config::peer(); + config.listen.endpoints = vec![endpoint02.parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(true)).unwrap(); + config.transport.shared_memory.set_enabled(true).unwrap(); + println!("[ ][02a] Opening peer02 session: {}", endpoint02); + let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + + (peer01, peer02) + } + + async fn close_session(peer01: Session, peer02: Session) { + println!("[ ][01d] Closing peer02 session"); + ztimeout!(peer01.close().res_async()).unwrap(); + println!("[ ][02d] Closing peer02 session"); + ztimeout!(peer02.close().res_async()).unwrap(); + } + + async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Reliability) { + let msg_count = match reliability { + Reliability::Reliable => MSG_COUNT, + Reliability::BestEffort => 1, + }; + let msgs = Arc::new(AtomicUsize::new(0)); + + for size in MSG_SIZE { + let key_expr = format!("shm{size}"); + + msgs.store(0, Ordering::SeqCst); + + // Subscribe to data + println!("[PS][01b] Subscribing on peer01 session"); + let c_msgs = msgs.clone(); + let _sub = ztimeout!(peer01 + .declare_subscriber(&key_expr) + .callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs.fetch_add(1, Ordering::Relaxed); + }) + .res_async()) + .unwrap(); + + // Wait for the declaration to propagate + tokio::time::sleep(SLEEP).await; + + // create SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(size * MSG_COUNT / 10) + .unwrap() + .res() + .unwrap(); + // ...and SHM provider + let shm01 = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // remember segment size that was allocated + let shm_segment_size = shm01.available(); + + // Prepare a layout for allocations + let layout = shm01.alloc_layout().size(size).res().unwrap(); + + // Put data + println!("[PS][03b] Putting on peer02 session. {MSG_COUNT} msgs of {size} bytes."); + for c in 0..msg_count { + // Allocate new message + let sbuf = ztimeout!(layout + .alloc() + .with_policy::>() + .res_async()) + .unwrap(); + println!("{c} created"); + + // Publish this message + ztimeout!(peer02 + .put(&key_expr, sbuf) + .congestion_control(CongestionControl::Block) + .res_async()) + .unwrap(); + println!("{c} putted"); + } + + // wat for all messages received + ztimeout!(async { + loop { + let cnt = msgs.load(Ordering::Relaxed); + println!("[PS][03b] Received {cnt}/{msg_count}."); + if cnt != msg_count { + tokio::time::sleep(SLEEP).await; + } else { + break; + } + } + }); + + // wat for all memory reclaimed + ztimeout!(async { + loop { + shm01.garbage_collect(); + let available = shm01.available(); + println!("[PS][03b] SHM available {available}/{shm_segment_size}"); + if available != shm_segment_size { + tokio::time::sleep(SLEEP).await; + } else { + break; + } + } + }); + } + } + + #[cfg(feature = "shared-memory")] + #[test] + fn zenoh_shm_unicast() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Initiate logging + zenoh_util::try_init_log_from_env(); + + let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; + test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; + close_session(peer01, peer02).await; + }); + } + + #[cfg(feature = "shared-memory")] + #[test] + fn zenoh_shm_multicast() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Initiate logging + zenoh_util::try_init_log_from_env(); + + let (peer01, peer02) = + open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; + test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; + close_session(peer01, peer02).await; + }); + } +} From e2279d85192e8229d488380c3fb960e9a77d98a9 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 22 Apr 2024 16:59:09 +0200 Subject: [PATCH 277/357] Remove experimental code from bytes --- zenoh/src/bytes.rs | 66 ++-------------------------------------------- 1 file changed, 2 insertions(+), 64 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index 036271b765..fb4e3a19e9 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -15,14 +15,8 @@ //! ZBytes primitives. use crate::buffers::ZBuf; use std::{ - borrow::Cow, - convert::Infallible, - fmt::Debug, - marker::PhantomData, - ops::{Add, AddAssign, Deref}, - str::Utf8Error, - string::FromUtf8Error, - sync::Arc, + borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, + string::FromUtf8Error, sync::Arc, }; use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ @@ -43,62 +37,6 @@ use zenoh_shm::{ SharedMemoryBuf, }; -pub enum Cipher { - Aes512(ZSlice), -} - -pub enum Compression { - LZ4, -} - -#[derive(Default)] -pub struct Transformation { - cipher: Option, - compression: Option, -} - -impl From for Transformation { - fn from(value: Cipher) -> Self { - Transformation { - cipher: Some(value), - ..Default::default() - } - } -} - -impl From for Transformation { - fn from(value: Compression) -> Self { - Transformation { - compression: Some(value), - ..Default::default() - } - } -} - -impl Add for Transformation { - type Output = Transformation; - - fn add(mut self, rhs: Self) -> Self::Output { - self += rhs; - self - } -} - -impl AddAssign for Transformation { - fn add_assign(&mut self, rhs: Transformation) { - fn combine(mut lhs: Option, mut rhs: Option) -> Option { - match (lhs.take(), rhs.take()) { - (Some(_), Some(r)) => Some(r), - (None, r) => r, - (l, None) => l, - } - } - - self.cipher = combine(self.cipher.take(), rhs.cipher); - self.compression = combine(self.compression.take(), rhs.compression); - } -} - /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { type Output; From f98436abb66febfb342823876973322106c5ded1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 22 Apr 2024 18:09:35 +0200 Subject: [PATCH 278/357] merge protocol changes followup --- zenoh/src/api/scouting.rs | 2 ++ zenoh/src/api/session.rs | 27 +++++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 2b0022f242..a3c86655b6 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -351,6 +351,7 @@ fn _scout( /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; +/// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .res() @@ -376,3 +377,4 @@ where handler: DefaultHandler::default(), } } + diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index d00ae0a532..c481b01bdf 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -2664,7 +2664,11 @@ where TryIntoConfig: std::convert::TryInto + Send + 'static, >::Error: std::fmt::Debug, { - OpenBuilder { config } + OpenBuilder { + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients: None, + } } /// A builder returned by [`open`] used to open a zenoh [`Session`]. @@ -2685,6 +2689,20 @@ where >::Error: std::fmt::Debug, { config: TryIntoConfig, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + shm_clients: Option>, +} + +#[cfg(all(feature = "unstable", feature = "shared-memory"))] +impl OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + pub fn with_shm_clients(mut self, shm_clients: Arc) -> Self { + self.shm_clients = Some(shm_clients); + self + } } impl Resolvable for OpenBuilder @@ -2705,7 +2723,12 @@ where .config .try_into() .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; - Session::new(config).res_sync() + Session::new( + config, + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + self.shm_clients, + ) + .res_sync() } } From 420e38b1335840c35662ed01bf91ffec9ddbdc46 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 23 Apr 2024 09:36:14 +0200 Subject: [PATCH 279/357] build fixes --- zenoh/src/api/loader.rs | 2 +- zenoh/src/api/mod.rs | 3 +++ zenoh/src/lib.rs | 3 ++- zenoh/src/net/runtime/adminspace.rs | 13 ++++--------- zenoh/src/net/runtime/mod.rs | 9 +++++---- zenohd/src/main.rs | 7 +++---- 6 files changed, 18 insertions(+), 19 deletions(-) diff --git a/zenoh/src/api/loader.rs b/zenoh/src/api/loader.rs index 084bae82b7..e4a28de02e 100644 --- a/zenoh/src/api/loader.rs +++ b/zenoh/src/api/loader.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::sealed::{PluginsManager, PLUGIN_PREFIX}; +use super::plugins::{PluginsManager, PLUGIN_PREFIX}; use crate::runtime::Runtime; use zenoh_config::{Config, PluginLoad}; use zenoh_result::ZResult; diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs index c2cc3504f0..e93a5e025c 100644 --- a/zenoh/src/api/mod.rs +++ b/zenoh/src/api/mod.rs @@ -23,6 +23,7 @@ pub(crate) mod info; pub(crate) mod key_expr; #[cfg(feature = "unstable")] pub(crate) mod liveliness; +#[cfg(all(feature = "unstable", feature = "plugins"))] pub(crate) mod plugins; pub(crate) mod publication; pub(crate) mod query; @@ -34,3 +35,5 @@ pub(crate) mod session; pub(crate) mod subscriber; pub(crate) mod time; pub(crate) mod value; +#[cfg(all(feature = "unstable", feature = "plugins"))] +pub(crate) mod loader; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index de57f195e6..bf6675f63b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -82,7 +82,6 @@ extern crate zenoh_result; mod api; mod net; - #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub use zenoh_shm::api as shm; #[cfg(all(feature = "unstable", feature = "shared-memory"))] @@ -329,6 +328,7 @@ pub mod time { /// This operation is used by the plugins to share the same Runtime as the router. #[doc(hidden)] pub mod runtime { + pub use crate::net::runtime::RuntimeBuilder; pub use crate::net::runtime::{AdminSpace, Runtime}; pub use zenoh_runtime::ZRuntime; } @@ -343,6 +343,7 @@ pub mod config { } #[doc(hidden)] +#[cfg(all(feature = "unstable", feature = "plugins"))] pub mod plugins { pub use crate::api::plugins::PluginsManager; pub use crate::api::plugins::Response; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 64c1a4cae1..b35d81a81a 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -15,19 +15,13 @@ use super::Runtime; use crate::api::builders::sample::ValueBuilderTrait; use crate::api::bytes::ZBytes; use crate::api::key_expr::KeyExpr; -use crate::api::plugins; +#[cfg(all(feature = "unstable", feature = "plugins"))] +use crate::api::plugins::PluginsManager; use crate::api::queryable::Query; use crate::api::queryable::QueryInner; use crate::api::value::Value; use crate::encoding::Encoding; use crate::net::primitives::Primitives; -#[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::plugins::sealed::{self as plugins}; -use crate::prelude::sync::SyncResolve; -use crate::queryable::Query; -use crate::queryable::QueryInner; -use crate::sample::builder::ValueBuilderTrait; -use crate::value::Value; use serde_json::json; use std::collections::HashMap; use std::convert::TryFrom; @@ -37,6 +31,7 @@ use std::sync::Mutex; use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI}; +use zenoh_core::SyncResolve; #[cfg(all(feature = "unstable", feature = "plugins"))] use zenoh_plugin_trait::{PluginControl, PluginStatus}; #[cfg(all(feature = "unstable", feature = "plugins"))] @@ -109,7 +104,7 @@ impl ConfigValidator for AdminSpace { impl AdminSpace { #[cfg(all(feature = "unstable", feature = "plugins"))] fn start_plugin( - plugin_mgr: &mut plugins::PluginsManager, + plugin_mgr: &mut PluginsManager, config: &zenoh_config::PluginLoad, start_args: &Runtime, required: bool, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 06847b9ceb..456899f918 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -23,9 +23,11 @@ pub mod orchestrator; use super::primitives::DeMux; use super::routing; use super::routing::router::Router; +#[cfg(all(feature = "unstable", feature = "plugins"))] +use crate::api::loader::{load_plugins, start_plugins}; use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; #[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::plugins::sealed::PluginsManager; +use crate::api::plugins::PluginsManager; use crate::{GIT_VERSION, LONG_VERSION}; pub use adminspace::AdminSpace; use futures::stream::StreamExt; @@ -39,7 +41,6 @@ use std::time::Duration; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; -use zenoh_config::{unwrap_or_default, Config, ModeDependent, Notifier}; use zenoh_link::{EndPoint, Link}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; @@ -158,7 +159,7 @@ impl RuntimeBuilder { #[cfg(all(feature = "unstable", feature = "plugins"))] let plugins_manager = plugins_manager .take() - .unwrap_or_else(|| crate::plugins::loader::load_plugins(&config)); + .unwrap_or_else(|| load_plugins(&config)); // Admin space creation flag let start_admin_space = *config.adminspace.enabled(); @@ -185,7 +186,7 @@ impl RuntimeBuilder { // Start plugins #[cfg(all(feature = "unstable", feature = "plugins"))] - crate::plugins::loader::start_plugins(&runtime); + start_plugins(&runtime); // Start notifier task let receiver = config.subscribe(); diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 9480f99223..3629e4dae4 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -18,11 +18,10 @@ use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; use zenoh::config::EndPoint; -use zenoh::config::{Config, ModeDependentValue, PermissionsConf, PluginLoad, ValidatedMap}; +use zenoh::config::{Config, ModeDependentValue, PermissionsConf, ValidatedMap}; use zenoh::core::Result; -use zenoh::plugins::PluginsManager; -use zenoh::runtime::{AdminSpace, Runtime}; use zenoh::scouting::WhatAmI; +use zenoh::core::AsyncResolve; #[cfg(feature = "loki")] use url::Url; @@ -108,7 +107,7 @@ fn main() { let config = config_from_args(&args); tracing::info!("Initial conf: {}", &config); - let _session = match zenoh::open(config).res().await { + let _session = match zenoh::open(config).res_async().await { Ok(runtime) => runtime, Err(e) => { println!("{e}. Exiting..."); From a9a906f7bcad34b2f62353ef5aee2c210e2748e7 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 23 Apr 2024 09:44:02 +0200 Subject: [PATCH 280/357] cargo fmt --- zenoh/src/api/mod.rs | 4 ++-- zenoh/src/api/scouting.rs | 1 - zenoh/src/net/runtime/mod.rs | 2 +- zenohd/src/main.rs | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs index e93a5e025c..694890ad6c 100644 --- a/zenoh/src/api/mod.rs +++ b/zenoh/src/api/mod.rs @@ -24,6 +24,8 @@ pub(crate) mod key_expr; #[cfg(feature = "unstable")] pub(crate) mod liveliness; #[cfg(all(feature = "unstable", feature = "plugins"))] +pub(crate) mod loader; +#[cfg(all(feature = "unstable", feature = "plugins"))] pub(crate) mod plugins; pub(crate) mod publication; pub(crate) mod query; @@ -35,5 +37,3 @@ pub(crate) mod session; pub(crate) mod subscriber; pub(crate) mod time; pub(crate) mod value; -#[cfg(all(feature = "unstable", feature = "plugins"))] -pub(crate) mod loader; diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index a3c86655b6..c4e411dec9 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -377,4 +377,3 @@ where handler: DefaultHandler::default(), } } - diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 456899f918..4991844650 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -25,9 +25,9 @@ use super::routing; use super::routing::router::Router; #[cfg(all(feature = "unstable", feature = "plugins"))] use crate::api::loader::{load_plugins, start_plugins}; -use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; #[cfg(all(feature = "unstable", feature = "plugins"))] use crate::api::plugins::PluginsManager; +use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; use crate::{GIT_VERSION, LONG_VERSION}; pub use adminspace::AdminSpace; use futures::stream::StreamExt; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 3629e4dae4..d8fed7eeb4 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -19,9 +19,9 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; use zenoh::config::EndPoint; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, ValidatedMap}; +use zenoh::core::AsyncResolve; use zenoh::core::Result; use zenoh::scouting::WhatAmI; -use zenoh::core::AsyncResolve; #[cfg(feature = "loki")] use url::Url; From 618bed1ff9c2ef17cf73860182b6849b68f06fb2 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 23 Apr 2024 09:57:24 +0200 Subject: [PATCH 281/357] restored zenoh-macro for zenoh-ext --- Cargo.lock | 1 + zenoh-ext/Cargo.toml | 1 + zenoh-ext/src/publication_cache.rs | 2 +- zenoh-ext/src/querying_subscriber.rs | 4 ++-- zenoh/src/lib.rs | 1 - 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4244273f87..737cb62f75 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5249,6 +5249,7 @@ dependencies = [ "tokio", "tracing", "zenoh", + "zenoh-macros", "zenoh-util", ] diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 61b0bf13df..402d37e5f4 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -51,6 +51,7 @@ serde = { workspace = true, features = ["default"] } serde_cbor = { workspace = true } serde_json = { workspace = true } zenoh = { workspace = true, features = ["unstable"], default-features = false } +zenoh-macros = { workspace = true } [package.metadata.docs.rs] features = ["unstable"] diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index b6a380d766..7080b44ac4 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -66,7 +66,7 @@ impl<'a, 'b, 'c> PublicationCacheBuilder<'a, 'b, 'c> { /// Restrict the matching queries that will be receive by this [`PublicationCache`]'s queryable /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh::internal::unstable] + #[zenoh_macros::unstable] #[inline] pub fn queryable_allowed_origin(mut self, origin: Locality) -> Self { self.queryable_origin = Some(origin); diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 1bce18a64f..35eb9afe46 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -163,7 +163,7 @@ impl<'a, 'b, Handler> QueryingSubscriberBuilder<'a, 'b, crate::UserSpace, Handle /// Restrict the matching publications that will be receive by this [`Subscriber`] /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh::internal::unstable] + #[zenoh_macros::unstable] #[inline] pub fn allowed_origin(mut self, origin: Locality) -> Self { self.origin = origin; @@ -523,7 +523,7 @@ where /// Restrict the matching publications that will be receive by this [`FetchingSubscriber`] /// to the ones that have the given [`Locality`](zenoh::prelude::Locality). - #[zenoh::internal::unstable] + #[zenoh_macros::unstable] #[inline] pub fn allowed_origin(mut self, origin: Locality) -> Self { self.origin = origin; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index bf6675f63b..47c95f2d52 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -357,7 +357,6 @@ pub mod internal { pub use zenoh_core::zerror; pub use zenoh_core::zlock; pub use zenoh_core::ztimeout; - pub use zenoh_macros::unstable; pub use zenoh_result::bail; pub use zenoh_sync::Condition; pub use zenoh_task::TaskController; From d10308568c7fb0444ea3412259bf344cafd0f8dc Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Tue, 23 Apr 2024 11:51:10 +0200 Subject: [PATCH 282/357] shm clippy still fails --- zenoh/src/lib.rs | 21 ++++++++++++++------- zenoh/tests/payload.rs | 13 +------------ zenoh/tests/shm.rs | 7 +------ 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 47c95f2d52..c4f671ec79 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -82,11 +82,6 @@ extern crate zenoh_result; mod api; mod net; -#[cfg(all(feature = "unstable", feature = "shared-memory"))] -pub use zenoh_shm::api as shm; -#[cfg(all(feature = "unstable", feature = "shared-memory"))] -pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; - lazy_static::lazy_static!( static ref LONG_VERSION: String = format!("{} built with {}", GIT_VERSION, env!("RUSTC_VERSION")); ); @@ -366,7 +361,19 @@ pub mod internal { pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; } -#[cfg(feature = "shared-memory")] +#[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { - pub use zenoh_shm::SharedMemoryManager; + pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; + pub use zenoh_shm::api::slice::zsliceshm::{zsliceshm, ZSliceShm}; + pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; + pub use zenoh_shm::api::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::SharedMemoryProviderBuilder, + }; + pub use zenoh_shm::api::provider::shared_memory_provider::{ + BlockOn, GarbageCollect, + }; } diff --git a/zenoh/tests/payload.rs b/zenoh/tests/payload.rs index d9910bedf5..fac5d37367 100644 --- a/zenoh/tests/payload.rs +++ b/zenoh/tests/payload.rs @@ -15,18 +15,7 @@ #[test] #[cfg(all(feature = "shared-memory", feature = "unstable"))] fn shm_payload_single_buf() { - use zenoh::shm::slice::zsliceshm::{zsliceshm, ZSliceShm}; - use zenoh::shm::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; - use zenoh::{ - bytes::ZBytes, - shm::{ - protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, - }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, - }, - }; + use zenoh::prelude::r#async::*; // create an SHM backend... let backend = PosixSharedMemoryProviderBackend::builder() diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 2a9685eb36..92d1b17732 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -17,12 +17,7 @@ mod tests { use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; - use zenoh::shm::protocol_implementations::posix::posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend; - use zenoh::shm::protocol_implementations::posix::protocol_id::POSIX_PROTOCOL_ID; - use zenoh::shm::provider::shared_memory_provider::{ - BlockOn, GarbageCollect, SharedMemoryProviderBuilder, - }; - use zenoh_core::ztimeout; + use zenoh::internal::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From 3d6a6e8c1d0334be8c1d3ef16a87095f6bebdb83 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 23 Apr 2024 14:06:00 +0300 Subject: [PATCH 283/357] [skip ci] SHM Payload API example and test --- examples/Cargo.toml | 7 +- examples/examples/z_payload_shm.rs | 101 +++++++++++++++++++++++++++++ zenoh/tests/payload.rs | 51 +++++---------- 3 files changed, 124 insertions(+), 35 deletions(-) create mode 100644 examples/examples/z_payload_shm.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index e117507ae9..b240d06723 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -156,4 +156,9 @@ path = "examples/z_pong.rs" [[example]] name = "z_alloc_shm" path = "examples/z_alloc_shm.rs" -required-features = ["unstable", "shared-memory"] \ No newline at end of file +required-features = ["unstable", "shared-memory"] + +[[example]] +name = "z_payload_shm" +path = "examples/z_payload_shm.rs" +required-features = ["unstable", "shared-memory"] diff --git a/examples/examples/z_payload_shm.rs b/examples/examples/z_payload_shm.rs new file mode 100644 index 0000000000..3b03b80502 --- /dev/null +++ b/examples/examples/z_payload_shm.rs @@ -0,0 +1,101 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::shm::slice::zsliceshm::{zsliceshm, ZSliceShm}; +use zenoh::shm::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; +use zenoh::{ + bytes::ZBytes, + shm::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::SharedMemoryProviderBuilder, + }, +}; + +fn main() { + // create an SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc_layout().size(1024).res().unwrap(); + + // allocate an SHM buffer (ZSliceShmMut) + let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); + + // mutable and immutable API + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // convert into immutable owned buffer (ZSliceShmMut -> ZSlceShm) + let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); + + // immutable API + let _data: &[u8] = &owned_shm_buf; + + // convert again into mutable owned buffer (ZSliceShm -> ZSlceShmMut) + let mut owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); + + // mutable and immutable API + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // build a ZBytes from an SHM buffer (ZSliceShmMut -> ZBytes) + let mut payload: ZBytes = owned_shm_buf_mut.into(); + + // branch to illustrate immutable access to SHM data + { + // deserialize ZBytes as an immutably borrowed zsliceshm (ZBytes -> &zsliceshm) + let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); + + // immutable API + let _data: &[u8] = borrowed_shm_buf; + + // construct owned buffer from borrowed type (&zsliceshm -> ZSliceShm) + let owned = borrowed_shm_buf.to_owned(); + + // immutable API + let _data: &[u8] = &owned; + + // try to construct mutable ZSliceShmMut (ZSliceShm -> ZSliceShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZSliceShm has two existing references ('owned' and inside 'payload') + assert!(owned_mut.is_err()) + } + + // branch to illustrate mutable access to SHM data + { + // deserialize ZBytes as mutably borrowed zsliceshm (ZBytes -> &mut zsliceshm) + let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); + + // immutable API + let _data: &[u8] = borrowed_shm_buf; + + // convert zsliceshm to zsliceshmmut (&mut zsliceshm -> &mut zsliceshmmut) + let borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); + + // mutable and immutable API + let _data: &[u8] = borrowed_shm_buf_mut; + let _data_mut: &mut [u8] = borrowed_shm_buf_mut; + } +} diff --git a/zenoh/tests/payload.rs b/zenoh/tests/payload.rs index d9910bedf5..1bcbf33ef4 100644 --- a/zenoh/tests/payload.rs +++ b/zenoh/tests/payload.rs @@ -43,55 +43,38 @@ fn shm_payload_single_buf() { // Prepare a layout for allocations let layout = provider.alloc_layout().size(1024).res().unwrap(); - // allocate an SHM buffer - let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); + // allocate an SHM buffer (ZSliceShmMut) + let owned_shm_buf_mut = layout.alloc().res().unwrap(); - // get data - let _data: &[u8] = &owned_shm_buf_mut; - let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - - // convert into immutable owned buffer + // convert into immutable owned buffer (ZSliceShmMut -> ZSlceShm) let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); - // get data - let _data: &[u8] = &owned_shm_buf; - - // convert again into mutable owned buffer - let mut owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); - - // get data - let _data: &[u8] = &owned_shm_buf_mut; - let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + // convert again into mutable owned buffer (ZSliceShm -> ZSlceShmMut) + let owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); - // build a ZBytes from an SHM buffer + // build a ZBytes from an SHM buffer (ZSliceShmMut -> ZBytes) let mut payload: ZBytes = owned_shm_buf_mut.into(); + // branch to illustrate immutable access to SHM data { - // deserialize ZBytes as borrowed zsliceshm + // deserialize ZBytes as an immutably borrowed zsliceshm (ZBytes -> &zsliceshm) let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); - // get data - let _data: &[u8] = borrowed_shm_buf; - - // construct owned buffer from borrowed type + // construct owned buffer from borrowed type (&zsliceshm -> ZSliceShm) let owned = borrowed_shm_buf.to_owned(); - // get data - let _data: &[u8] = &owned; + // try to construct mutable ZSliceShmMut (ZSliceShm -> ZSliceShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZSliceShm has two existing references ('owned' and inside 'payload') + assert!(owned_mut.is_err()) } + // branch to illustrate mutable access to SHM data { - // deserialize ZBytes as mutably borrowed zsliceshm + // deserialize ZBytes as mutably borrowed zsliceshm (ZBytes -> &mut zsliceshm) let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); - // get data - let _data: &[u8] = borrowed_shm_buf; - - // convert zsliceshm to zsliceshmmut - let borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); - - // get data - let _data: &[u8] = borrowed_shm_buf_mut; - let _data_mut: &mut [u8] = borrowed_shm_buf_mut; + // convert zsliceshm to zsliceshmmut (&mut zsliceshm -> &mut zsliceshmmut) + let _borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); } } From d9b65a73eb79230c2fb4020f5c428b2d41d111f9 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 23 Apr 2024 14:36:06 +0200 Subject: [PATCH 284/357] Fix tuple deserialization lifetime (#954) --- zenoh/src/bytes.rs | 53 ++++++++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs index fb4e3a19e9..c36136ef81 100644 --- a/zenoh/src/bytes.rs +++ b/zenoh/src/bytes.rs @@ -1694,10 +1694,10 @@ where impl<'s, A, B> Deserialize<'s, (A, B)> for ZSerde where - for<'a> A: TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Input = &'s ZBytes; type Error = ZError; @@ -1712,18 +1712,18 @@ where let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; let bpld = ZBytes::new(bbuf); - let a = A::try_from(&apld).map_err(|e| zerror!("{:?}", e))?; - let b = B::try_from(&bpld).map_err(|e| zerror!("{:?}", e))?; + let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; + let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; Ok((a, b)) } } impl TryFrom for (A, B) where - A: for<'a> TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Error = ZError; @@ -1734,10 +1734,10 @@ where impl TryFrom<&ZBytes> for (A, B) where - for<'a> A: TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Error = ZError; @@ -1748,10 +1748,10 @@ where impl TryFrom<&mut ZBytes> for (A, B) where - for<'a> A: TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Error = ZError; @@ -1977,6 +1977,14 @@ mod tests { serialize_deserialize!((usize, usize), (0, 1)); serialize_deserialize!((usize, String), (0, String::from("a"))); serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); + serialize_deserialize!( + (Cow<'static, [u8]>, Cow<'static, [u8]>), + (Cow::from(vec![0u8; 8]), Cow::from(vec![0u8; 8])) + ); + serialize_deserialize!( + (Cow<'static, str>, Cow<'static, str>), + (Cow::from("a"), Cow::from("b")) + ); // Iterator let v: [usize; 5] = [0, 1, 2, 3, 4]; @@ -2061,5 +2069,14 @@ mod tests { println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(String, String)>()); assert_eq!(hm, o); + + let mut hm: HashMap, Cow<'static, str>> = HashMap::new(); + hm.insert(Cow::from("0"), Cow::from("a")); + hm.insert(Cow::from("1"), Cow::from("b")); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from_iter(hm.iter()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(Cow<'static, str>, Cow<'static, str>)>()); + assert_eq!(hm, o); } } From da5a1a01edaade37228e80342bb69b48ce362735 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 24 Apr 2024 18:39:36 +0200 Subject: [PATCH 285/357] cargo fmt --- zenoh/src/lib.rs | 4 +--- zenoh/tests/shm.rs | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c4f671ec79..60dab218db 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -364,6 +364,7 @@ pub mod internal { #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; + pub use zenoh_shm::api::provider::shared_memory_provider::{BlockOn, GarbageCollect}; pub use zenoh_shm::api::slice::zsliceshm::{zsliceshm, ZSliceShm}; pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; pub use zenoh_shm::api::{ @@ -373,7 +374,4 @@ pub mod shm { }, provider::shared_memory_provider::SharedMemoryProviderBuilder, }; - pub use zenoh_shm::api::provider::shared_memory_provider::{ - BlockOn, GarbageCollect, - }; } diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 92d1b17732..a7bc481e27 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -16,8 +16,8 @@ mod tests { use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; - use zenoh::prelude::r#async::*; use zenoh::internal::ztimeout; + use zenoh::prelude::r#async::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); From 6182ae1b710ded6ff837de6c14bac567d25ffee3 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Wed, 24 Apr 2024 18:54:59 +0200 Subject: [PATCH 286/357] test fix --- .config/nextest.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.config/nextest.toml b/.config/nextest.toml index b2ed4cde98..4999dce0d3 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -16,7 +16,7 @@ test(=three_node_combination) | test(=watchdog_alloc_concurrent) | test(=header_check_memory_concurrent) | test(=header_link_concurrent) | -test(=header_link_failure_concurrent) +test(=header_link_failure_concurrent) | test(=downsampling_by_keyexpr) """ threads-required = 'num-cpus' From dd6720df4c8a29140baf65f8a603a059dbcc1e2c Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 25 Apr 2024 12:30:20 +0200 Subject: [PATCH 287/357] restored lost bytes.rs --- zenoh/src/api/bytes.rs | 817 +++++++++++++++++++++++++++++++++-------- 1 file changed, 664 insertions(+), 153 deletions(-) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 6f8ba23a65..c36136ef81 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -28,8 +28,14 @@ use zenoh_buffers::{ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; -#[cfg(feature = "shared-memory")] -use zenoh_shm::SharedMemoryBuf; +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +use zenoh_shm::{ + api::slice::{ + zsliceshm::{zsliceshm, ZSliceShm}, + zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + }, + SharedMemoryBuf, +}; /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { @@ -40,10 +46,11 @@ pub trait Serialize { } pub trait Deserialize<'a, T> { + type Input: 'a; type Error; /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &'a ZBytes) -> Result; + fn deserialize(self, t: Self::Input) -> Result; } /// ZBytes contains the serialized bytes of user data. @@ -128,7 +135,18 @@ impl ZBytes { /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn deserialize<'a, T>(&'a self) -> ZResult where - ZSerde: Deserialize<'a, T>, + ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, + >::Error: Debug, + { + ZSerde + .deserialize(self) + .map_err(|e| zerror!("{:?}", e).into()) + } + + /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn deserialize_mut<'a, T>(&'a mut self) -> ZResult + where + ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes>, >::Error: Debug, { ZSerde @@ -139,7 +157,16 @@ impl ZBytes { /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. pub fn into<'a, T>(&'a self) -> T where - ZSerde: Deserialize<'a, T, Error = Infallible>, + ZSerde: Deserialize<'a, T, Input = &'a ZBytes, Error = Infallible>, + >::Error: Debug, + { + ZSerde.deserialize(self).unwrap_infallible() + } + + /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn into_mut<'a, T>(&'a mut self) -> T + where + ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes, Error = Infallible>, >::Error: Debug, { ZSerde.deserialize(self).unwrap_infallible() @@ -192,7 +219,7 @@ where impl Iterator for ZBytesIterator<'_, T> where - for<'a> ZSerde: Deserialize<'a, T>, + for<'a> ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, for<'a> >::Error: Debug, { type Item = T; @@ -311,10 +338,25 @@ impl From<&ZBuf> for ZBytes { } } -impl Deserialize<'_, ZBuf> for ZSerde { +impl Serialize<&mut ZBuf> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZBuf) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut ZBuf> for ZBytes { + fn from(t: &mut ZBuf) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, ZBuf> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { Ok(v.0.clone()) } } @@ -331,6 +373,12 @@ impl From<&ZBytes> for ZBuf { } } +impl From<&mut ZBytes> for ZBuf { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // ZSlice impl Serialize for ZSerde { type Output = ZBytes; @@ -360,10 +408,25 @@ impl From<&ZSlice> for ZBytes { } } -impl Deserialize<'_, ZSlice> for ZSerde { +impl Serialize<&mut ZSlice> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZSlice) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut ZSlice> for ZBytes { + fn from(t: &mut ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, ZSlice> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { Ok(v.0.to_zslice()) } } @@ -380,6 +443,12 @@ impl From<&ZBytes> for ZSlice { } } +impl From<&mut ZBytes> for ZSlice { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // [u8; N] impl Serialize<[u8; N]> for ZSerde { type Output = ZBytes; @@ -409,10 +478,25 @@ impl From<&[u8; N]> for ZBytes { } } -impl Deserialize<'_, [u8; N]> for ZSerde { +impl Serialize<&mut [u8; N]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut [u8; N]) -> Self::Output { + ZBytes::new(*t) + } +} + +impl From<&mut [u8; N]> for ZBytes { + fn from(t: &mut [u8; N]) -> Self { + ZSerde.serialize(*t) + } +} + +impl<'a, const N: usize> Deserialize<'a, [u8; N]> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result<[u8; N], Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<[u8; N], Self::Error> { use std::io::Read; if v.0.len() != N { @@ -441,6 +525,14 @@ impl TryFrom<&ZBytes> for [u8; N] { } } +impl TryFrom<&mut ZBytes> for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Vec impl Serialize> for ZSerde { type Output = ZBytes; @@ -470,10 +562,25 @@ impl From<&Vec> for ZBytes { } } -impl Deserialize<'_, Vec> for ZSerde { +impl Serialize<&mut Vec> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Vec) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut Vec> for ZBytes { + fn from(t: &mut Vec) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, Vec> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Ok(v.0.contiguous().to_vec()) } } @@ -490,6 +597,12 @@ impl From<&ZBytes> for Vec { } } +impl From<&mut ZBytes> for Vec { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // &[u8] impl Serialize<&[u8]> for ZSerde { type Output = ZBytes; @@ -505,6 +618,20 @@ impl From<&[u8]> for ZBytes { } } +impl Serialize<&mut [u8]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut [u8]) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl From<&mut [u8]> for ZBytes { + fn from(t: &mut [u8]) -> Self { + ZSerde.serialize(t) + } +} + // Cow<[u8]> impl<'a> Serialize> for ZSerde { type Output = ZBytes; @@ -534,10 +661,25 @@ impl From<&Cow<'_, [u8]>> for ZBytes { } } +impl<'a> Serialize<&mut Cow<'a, [u8]>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Cow<'a, [u8]>) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl From<&mut Cow<'_, [u8]>> for ZBytes { + fn from(t: &mut Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { + type Input = &'a ZBytes; type Error = Infallible; - fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Ok(v.0.contiguous()) } } @@ -557,6 +699,12 @@ impl<'a> From<&'a ZBytes> for Cow<'a, [u8]> { } } +impl<'a> From<&'a mut ZBytes> for Cow<'a, [u8]> { + fn from(value: &'a mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + // String impl Serialize for ZSerde { type Output = ZBytes; @@ -586,10 +734,25 @@ impl From<&String> for ZBytes { } } -impl Deserialize<'_, String> for ZSerde { +impl Serialize<&mut String> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut String) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut String> for ZBytes { + fn from(t: &mut String) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, String> for ZSerde { + type Input = &'a ZBytes; type Error = FromUtf8Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); String::from_utf8(v) } @@ -611,12 +774,20 @@ impl TryFrom<&ZBytes> for String { } } +impl TryFrom<&mut ZBytes> for String { + type Error = FromUtf8Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // &str impl Serialize<&str> for ZSerde { type Output = ZBytes; fn serialize(self, s: &str) -> Self::Output { - Self.serialize(s.to_string()) + ZSerde.serialize(s.to_string()) } } @@ -626,6 +797,20 @@ impl From<&str> for ZBytes { } } +impl Serialize<&mut str> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut str) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut str> for ZBytes { + fn from(t: &mut str) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Serialize> for ZSerde { type Output = ZBytes; @@ -644,7 +829,7 @@ impl<'a> Serialize<&Cow<'a, str>> for ZSerde { type Output = ZBytes; fn serialize(self, s: &Cow<'a, str>) -> Self::Output { - Self.serialize(s.to_string()) + ZSerde.serialize(s.to_string()) } } @@ -654,10 +839,25 @@ impl From<&Cow<'_, str>> for ZBytes { } } +impl<'a> Serialize<&mut Cow<'a, str>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Cow<'a, str>) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Cow<'_, str>> for ZBytes { + fn from(t: &mut Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { + type Input = &'a ZBytes; type Error = Utf8Error; - fn deserialize(self, v: &'a ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { Cow::try_from(v) } } @@ -686,6 +886,18 @@ impl<'a> TryFrom<&'a ZBytes> for Cow<'a, str> { } } +impl<'a> TryFrom<&'a mut ZBytes> for Cow<'a, str> { + type Error = Utf8Error; + + fn try_from(v: &'a mut ZBytes) -> Result { + let v: Cow<'a, [u8]> = Cow::from(v); + let _ = core::str::from_utf8(v.as_ref())?; + // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 + // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. + Ok(unsafe { core::mem::transmute(v) }) + } +} + // - Integers impl macro_rules! impl_int { ($t:ty) => { @@ -725,10 +937,25 @@ macro_rules! impl_int { } } + impl Serialize<&mut $t> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut $t) -> Self::Output { + Self.serialize(*t) + } + } + + impl From<&mut $t> for ZBytes { + fn from(t: &mut $t) -> Self { + ZSerde.serialize(t) + } + } + impl<'a> Deserialize<'a, $t> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result<$t, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<$t, Self::Error> { use std::io::Read; let mut r = v.reader(); @@ -758,6 +985,14 @@ macro_rules! impl_int { ZSerde.deserialize(value) } } + + impl TryFrom<&mut ZBytes> for $t { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } + } }; } @@ -810,10 +1045,25 @@ impl From<&bool> for ZBytes { } } -impl Deserialize<'_, bool> for ZSerde { +impl Serialize<&mut bool> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut bool) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&mut bool> for ZBytes { + fn from(t: &mut bool) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize<'a, bool> for ZSerde { + type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { let p = v.deserialize::().map_err(|_| ZDeserializeError)?; match p { 0 => Ok(false), @@ -839,6 +1089,14 @@ impl TryFrom<&ZBytes> for bool { } } +impl TryFrom<&mut ZBytes> for bool { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // - Zenoh advanced types encoders/decoders // Properties impl Serialize> for ZSerde { @@ -869,10 +1127,25 @@ impl<'s> From<&'s Properties<'s>> for ZBytes { } } +impl Serialize<&mut Properties<'_>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Properties<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl<'s> From<&'s mut Properties<'s>> for ZBytes { + fn from(t: &'s mut Properties<'s>) -> Self { + ZSerde.serialize(&*t) + } +} + impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { + type Input = &'s ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: &'s ZBytes) -> Result, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result, Self::Error> { let s = v .deserialize::>() .map_err(|_| ZDeserializeError)?; @@ -897,6 +1170,14 @@ impl<'s> TryFrom<&'s ZBytes> for Properties<'s> { } } +impl<'s> TryFrom<&'s mut ZBytes> for Properties<'s> { + type Error = ZDeserializeError; + + fn try_from(value: &'s mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // JSON impl Serialize for ZSerde { type Output = Result; @@ -932,10 +1213,29 @@ impl TryFrom<&serde_json::Value> for ZBytes { } } -impl Deserialize<'_, serde_json::Value> for ZSerde { +impl Serialize<&mut serde_json::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_json::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_json::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&mut serde_json::Value> for ZBytes { type Error = serde_json::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn try_from(value: &mut serde_json::Value) -> Result { + ZSerde.serialize(&*value) + } +} + +impl<'a> Deserialize<'a, serde_json::Value> for ZSerde { + type Input = &'a ZBytes; + type Error = serde_json::Error; + + fn deserialize(self, v: Self::Input) -> Result { serde_json::from_reader(v.reader()) } } @@ -956,6 +1256,14 @@ impl TryFrom<&ZBytes> for serde_json::Value { } } +impl TryFrom<&mut ZBytes> for serde_json::Value { + type Error = serde_json::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Yaml impl Serialize for ZSerde { type Output = Result; @@ -991,10 +1299,29 @@ impl TryFrom<&serde_yaml::Value> for ZBytes { } } -impl Deserialize<'_, serde_yaml::Value> for ZSerde { +impl Serialize<&mut serde_yaml::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_yaml::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_yaml::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&mut serde_yaml::Value> for ZBytes { + type Error = serde_yaml::Error; + + fn try_from(value: &mut serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_yaml::Value> for ZSerde { + type Input = &'a ZBytes; type Error = serde_yaml::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { serde_yaml::from_reader(v.reader()) } } @@ -1015,6 +1342,14 @@ impl TryFrom<&ZBytes> for serde_yaml::Value { } } +impl TryFrom<&mut ZBytes> for serde_yaml::Value { + type Error = serde_yaml::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // CBOR impl Serialize for ZSerde { type Output = Result; @@ -1050,10 +1385,27 @@ impl TryFrom<&serde_cbor::Value> for ZBytes { } } -impl Deserialize<'_, serde_cbor::Value> for ZSerde { +impl Serialize<&mut serde_cbor::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_cbor::Value) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl TryFrom<&mut serde_cbor::Value> for ZBytes { type Error = serde_cbor::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn try_from(value: &mut serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_cbor::Value> for ZSerde { + type Input = &'a ZBytes; + type Error = serde_cbor::Error; + + fn deserialize(self, v: Self::Input) -> Result { serde_cbor::from_reader(v.reader()) } } @@ -1074,6 +1426,14 @@ impl TryFrom<&ZBytes> for serde_cbor::Value { } } +impl TryFrom<&mut ZBytes> for serde_cbor::Value { + type Error = serde_cbor::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Pickle impl Serialize for ZSerde { type Output = Result; @@ -1113,10 +1473,27 @@ impl TryFrom<&serde_pickle::Value> for ZBytes { } } -impl Deserialize<'_, serde_pickle::Value> for ZSerde { +impl Serialize<&mut serde_pickle::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_pickle::Value) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl TryFrom<&mut serde_pickle::Value> for ZBytes { + type Error = serde_pickle::Error; + + fn try_from(value: &mut serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl<'a> Deserialize<'a, serde_pickle::Value> for ZSerde { + type Input = &'a ZBytes; type Error = serde_pickle::Error; - fn deserialize(self, v: &ZBytes) -> Result { + fn deserialize(self, v: Self::Input) -> Result { serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) } } @@ -1137,77 +1514,125 @@ impl TryFrom<&ZBytes> for serde_pickle::Value { } } +impl TryFrom<&mut ZBytes> for serde_pickle::Value { + type Error = serde_pickle::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // Shared memory conversion -#[cfg(feature = "shared-memory")] -impl Serialize> for ZSerde { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: Arc) -> Self::Output { - ZBytes::new(t) + fn serialize(self, t: ZSliceShm) -> Self::Output { + let slice: ZSlice = t.into(); + ZBytes::new(slice) } } -#[cfg(feature = "shared-memory")] -impl From> for ZBytes { - fn from(t: Arc) -> Self { + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl From for ZBytes { + fn from(t: ZSliceShm) -> Self { ZSerde.serialize(t) } } -#[cfg(feature = "shared-memory")] -impl Serialize> for ZSerde { +// Shared memory conversion +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: Box) -> Self::Output { - let smb: Arc = t.into(); - Self.serialize(smb) + fn serialize(self, t: ZSliceShmMut) -> Self::Output { + let slice: ZSlice = t.into(); + ZBytes::new(slice) } } -#[cfg(feature = "shared-memory")] -impl From> for ZBytes { - fn from(t: Box) -> Self { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl From for ZBytes { + fn from(t: ZSliceShmMut) -> Self { ZSerde.serialize(t) } } -#[cfg(feature = "shared-memory")] -impl Serialize for ZSerde { - type Output = ZBytes; +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { + type Input = &'a ZBytes; + type Error = ZDeserializeError; - fn serialize(self, t: SharedMemoryBuf) -> Self::Output { - ZBytes::new(t) + fn deserialize(self, v: Self::Input) -> Result<&'a zsliceshm, Self::Error> { + // A ZSliceShm is expected to have only one slice + let mut zslices = v.0.zslices(); + if let Some(zs) = zslices.next() { + if let Some(shmb) = zs.downcast_ref::() { + return Ok(shmb.into()); + } + } + Err(ZDeserializeError) } } -#[cfg(feature = "shared-memory")] -impl From for ZBytes { - fn from(t: SharedMemoryBuf) -> Self { - ZSerde.serialize(t) +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { + type Error = ZDeserializeError; + + fn try_from(value: &'a ZBytes) -> Result { + ZSerde.deserialize(value) } } -#[cfg(feature = "shared-memory")] -impl Deserialize<'_, SharedMemoryBuf> for ZSerde { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { type Error = ZDeserializeError; - fn deserialize(self, v: &ZBytes) -> Result { - // A SharedMemoryBuf is expected to have only one slice - let mut zslices = v.0.zslices(); + fn try_from(value: &'a mut ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { + type Input = &'a mut ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshm, Self::Error> { + // A ZSliceShmBorrowMut is expected to have only one slice + let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_ref::() { - return Ok(shmb.clone()); + if let Some(shmb) = zs.downcast_mut::() { + return Ok(shmb.into()); } } Err(ZDeserializeError) } } -#[cfg(feature = "shared-memory")] -impl TryFrom for SharedMemoryBuf { +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { + type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) + fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshmmut, Self::Error> { + // A ZSliceShmBorrowMut is expected to have only one slice + let mut zslices = v.0.zslices_mut(); + if let Some(zs) = zslices.next() { + if let Some(shmb) = zs.downcast_mut::() { + return shmb.try_into().map_err(|_| ZDeserializeError); + } + } + Err(ZDeserializeError) + } +} + +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshmmut { + type Error = ZDeserializeError; + + fn try_from(value: &'a mut ZBytes) -> Result { + ZSerde.deserialize(value) } } @@ -1267,16 +1692,17 @@ where } } -impl Deserialize<'_, (A, B)> for ZSerde +impl<'s, A, B> Deserialize<'s, (A, B)> for ZSerde where - for<'a> A: TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { + type Input = &'s ZBytes; type Error = ZError; - fn deserialize(self, bytes: &ZBytes) -> Result<(A, B), Self::Error> { + fn deserialize(self, bytes: Self::Input) -> Result<(A, B), Self::Error> { let codec = Zenoh080::new(); let mut reader = bytes.0.reader(); @@ -1286,18 +1712,18 @@ where let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; let bpld = ZBytes::new(bbuf); - let a = A::try_from(&apld).map_err(|e| zerror!("{:?}", e))?; - let b = B::try_from(&bpld).map_err(|e| zerror!("{:?}", e))?; + let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; + let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; Ok((a, b)) } } impl TryFrom for (A, B) where - A: for<'a> TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Error = ZError; @@ -1308,10 +1734,10 @@ where impl TryFrom<&ZBytes> for (A, B) where - for<'a> A: TryFrom<&'a ZBytes>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b ZBytes>, - for<'b> >::Error: Debug, + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, { type Error = ZError; @@ -1320,6 +1746,20 @@ where } } +impl TryFrom<&mut ZBytes> for (A, B) +where + A: TryFrom + 'static, + >::Error: Debug + 'static, + B: TryFrom + 'static, + >::Error: Debug + 'static, +{ + type Error = ZError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + // For convenience to always convert a Value in the examples #[derive(Debug, Clone, PartialEq, Eq)] pub enum StringOrBase64 { @@ -1361,6 +1801,13 @@ impl From<&ZBytes> for StringOrBase64 { } } +impl From<&mut ZBytes> for StringOrBase64 { + fn from(v: &mut ZBytes) -> Self { + StringOrBase64::from(&*v) + } +} + +// Protocol attachment extension impl From for AttachmentType { fn from(this: ZBytes) -> Self { AttachmentType { @@ -1384,6 +1831,16 @@ mod tests { use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::Properties; + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + use zenoh_shm::api::{ + protocol_implementations::posix::{ + posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shared_memory_provider::SharedMemoryProviderBuilder, + slice::zsliceshm::{zsliceshm, ZSliceShm}, + }; + const NUM: usize = 1_000; macro_rules! serialize_deserialize { @@ -1399,81 +1856,118 @@ mod tests { }; } - let mut rng = rand::thread_rng(); - - // unsigned integer - serialize_deserialize!(u8, u8::MIN); - serialize_deserialize!(u16, u16::MIN); - serialize_deserialize!(u32, u32::MIN); - serialize_deserialize!(u64, u64::MIN); - serialize_deserialize!(usize, usize::MIN); - - serialize_deserialize!(u8, u8::MAX); - serialize_deserialize!(u16, u16::MAX); - serialize_deserialize!(u32, u32::MAX); - serialize_deserialize!(u64, u64::MAX); - serialize_deserialize!(usize, usize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(u8, rng.gen::()); - serialize_deserialize!(u16, rng.gen::()); - serialize_deserialize!(u32, rng.gen::()); - serialize_deserialize!(u64, rng.gen::()); - serialize_deserialize!(usize, rng.gen::()); - } + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn numeric() { + let mut rng = rand::thread_rng(); + + // unsigned integer + serialize_deserialize!(u8, u8::MIN); + serialize_deserialize!(u16, u16::MIN); + serialize_deserialize!(u32, u32::MIN); + serialize_deserialize!(u64, u64::MIN); + serialize_deserialize!(usize, usize::MIN); + + serialize_deserialize!(u8, u8::MAX); + serialize_deserialize!(u16, u16::MAX); + serialize_deserialize!(u32, u32::MAX); + serialize_deserialize!(u64, u64::MAX); + serialize_deserialize!(usize, usize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(u8, rng.gen::()); + serialize_deserialize!(u16, rng.gen::()); + serialize_deserialize!(u32, rng.gen::()); + serialize_deserialize!(u64, rng.gen::()); + serialize_deserialize!(usize, rng.gen::()); + } - // signed integer - serialize_deserialize!(i8, i8::MIN); - serialize_deserialize!(i16, i16::MIN); - serialize_deserialize!(i32, i32::MIN); - serialize_deserialize!(i64, i64::MIN); - serialize_deserialize!(isize, isize::MIN); - - serialize_deserialize!(i8, i8::MAX); - serialize_deserialize!(i16, i16::MAX); - serialize_deserialize!(i32, i32::MAX); - serialize_deserialize!(i64, i64::MAX); - serialize_deserialize!(isize, isize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(i8, rng.gen::()); - serialize_deserialize!(i16, rng.gen::()); - serialize_deserialize!(i32, rng.gen::()); - serialize_deserialize!(i64, rng.gen::()); - serialize_deserialize!(isize, rng.gen::()); - } + // signed integer + serialize_deserialize!(i8, i8::MIN); + serialize_deserialize!(i16, i16::MIN); + serialize_deserialize!(i32, i32::MIN); + serialize_deserialize!(i64, i64::MIN); + serialize_deserialize!(isize, isize::MIN); + + serialize_deserialize!(i8, i8::MAX); + serialize_deserialize!(i16, i16::MAX); + serialize_deserialize!(i32, i32::MAX); + serialize_deserialize!(i64, i64::MAX); + serialize_deserialize!(isize, isize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(i8, rng.gen::()); + serialize_deserialize!(i16, rng.gen::()); + serialize_deserialize!(i32, rng.gen::()); + serialize_deserialize!(i64, rng.gen::()); + serialize_deserialize!(isize, rng.gen::()); + } - // float - serialize_deserialize!(f32, f32::MIN); - serialize_deserialize!(f64, f64::MIN); + // float + serialize_deserialize!(f32, f32::MIN); + serialize_deserialize!(f64, f64::MIN); - serialize_deserialize!(f32, f32::MAX); - serialize_deserialize!(f64, f64::MAX); + serialize_deserialize!(f32, f32::MAX); + serialize_deserialize!(f64, f64::MAX); - for _ in 0..NUM { - serialize_deserialize!(f32, rng.gen::()); - serialize_deserialize!(f64, rng.gen::()); + for _ in 0..NUM { + serialize_deserialize!(f32, rng.gen::()); + serialize_deserialize!(f64, rng.gen::()); + } + } + numeric(); + + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn basic() { + // String + serialize_deserialize!(String, ""); + serialize_deserialize!(String, String::from("abcdef")); + + // Cow + serialize_deserialize!(Cow, Cow::from("")); + serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); + + // Vec + serialize_deserialize!(Vec, vec![0u8; 0]); + serialize_deserialize!(Vec, vec![0u8; 64]); + + // Cow<[u8]> + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); + + // ZBuf + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + } + basic(); + + // SHM + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + { + // create an SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc_layout().size(1024).res().unwrap(); + + // allocate an SHM buffer + let mutable_shm_buf = layout.alloc().res().unwrap(); + + // convert to immutable SHM buffer + let immutable_shm_buf: ZSliceShm = mutable_shm_buf.into(); + + serialize_deserialize!(&zsliceshm, immutable_shm_buf); } - - // String - serialize_deserialize!(String, ""); - serialize_deserialize!(String, String::from("abcdef")); - - // Cow - serialize_deserialize!(Cow, Cow::from("")); - serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); - - // Vec - serialize_deserialize!(Vec, vec![0u8; 0]); - serialize_deserialize!(Vec, vec![0u8; 64]); - - // Cow<[u8]> - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); - - // ZBuf - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); // Properties serialize_deserialize!(Properties, Properties::from("")); @@ -1483,6 +1977,14 @@ mod tests { serialize_deserialize!((usize, usize), (0, 1)); serialize_deserialize!((usize, String), (0, String::from("a"))); serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); + serialize_deserialize!( + (Cow<'static, [u8]>, Cow<'static, [u8]>), + (Cow::from(vec![0u8; 8]), Cow::from(vec![0u8; 8])) + ); + serialize_deserialize!( + (Cow<'static, str>, Cow<'static, str>), + (Cow::from("a"), Cow::from("b")) + ); // Iterator let v: [usize; 5] = [0, 1, 2, 3, 4]; @@ -1567,5 +2069,14 @@ mod tests { println!("Deserialize:\t{:?}\n", p); let o = HashMap::from_iter(p.iter::<(String, String)>()); assert_eq!(hm, o); + + let mut hm: HashMap, Cow<'static, str>> = HashMap::new(); + hm.insert(Cow::from("0"), Cow::from("a")); + hm.insert(Cow::from("1"), Cow::from("b")); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from_iter(hm.iter()); + println!("Deserialize:\t{:?}\n", p); + let o = HashMap::from_iter(p.iter::<(Cow<'static, str>, Cow<'static, str>)>()); + assert_eq!(hm, o); } } From 19be2468d15a5cca59d1b88512e00c0753beea05 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Thu, 25 Apr 2024 12:42:42 +0200 Subject: [PATCH 288/357] shm examples fixed --- examples/examples/z_alloc_shm.rs | 10 +--------- examples/examples/z_ping_shm.rs | 10 ---------- examples/examples/z_pub_shm.rs | 1 - examples/examples/z_sub_shm.rs | 1 - zenoh/src/lib.rs | 3 +++ 5 files changed, 4 insertions(+), 21 deletions(-) diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index a6afb1190c..34e1c07058 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -12,14 +12,6 @@ // ZettaScale Zenoh Team, // use zenoh::prelude::r#async::*; -use zenoh::shm::protocol_implementations::posix::posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend; -use zenoh::shm::protocol_implementations::posix::protocol_id::POSIX_PROTOCOL_ID; -use zenoh::shm::provider::shared_memory_provider::{ - BlockOn, GarbageCollect, SharedMemoryProviderBuilder, -}; -use zenoh::shm::provider::shared_memory_provider::{Deallocate, Defragment}; -use zenoh::shm::provider::types::{AllocAlignment, MemoryLayout}; -use zenoh::Result; #[tokio::main] async fn main() { @@ -28,7 +20,7 @@ async fn main() { run().await.unwrap() } -async fn run() -> Result<()> { +async fn run() -> ZResult<()> { // Construct an SHM backend let backend = { // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 08c08276d4..98d9bae825 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -13,17 +13,7 @@ // use clap::Parser; use std::time::{Duration, Instant}; -use zenoh::buffers::ZSlice; -use zenoh::config::Config; use zenoh::prelude::sync::*; -use zenoh::publication::CongestionControl; -use zenoh::shm::protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, -}; -use zenoh::shm::provider::shared_memory_provider::SharedMemoryProviderBuilder; -use zenoh::shm::provider::types::AllocAlignment; -use zenoh::shm::provider::types::MemoryLayout; use zenoh_examples::CommonArgs; fn main() { diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index cdabee5ff3..79527c3e5f 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index aa3967becd..282fd8c776 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -14,7 +14,6 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; -use zenoh::shm::slice::zsliceshm::zsliceshm; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 60dab218db..fce115cfb1 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -365,6 +365,9 @@ pub mod internal { pub mod shm { pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; pub use zenoh_shm::api::provider::shared_memory_provider::{BlockOn, GarbageCollect}; + pub use zenoh_shm::api::provider::shared_memory_provider::{Deallocate, Defragment}; + pub use zenoh_shm::api::provider::types::AllocAlignment; + pub use zenoh_shm::api::provider::types::MemoryLayout; pub use zenoh_shm::api::slice::zsliceshm::{zsliceshm, ZSliceShm}; pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; pub use zenoh_shm::api::{ From afacf77d052e0a957ffaeb94422d2eb743a53e0f Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 25 Apr 2024 16:22:53 +0200 Subject: [PATCH 289/357] refactor: remove `zenoh::query::Mode` because unused (#977) It seems that `Mode` has been integrated into ConsolidationMode, replacing `Mode`. --- zenoh/src/query.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index db7071c278..16cd7fdec5 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -31,13 +31,6 @@ pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; /// The kind of consolidation. pub type ConsolidationMode = zenoh_protocol::zenoh::query::Consolidation; -/// The operation: either manual or automatic. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum Mode { - Auto, - Manual(T), -} - /// The replies consolidation strategy to apply on replies to a [`get`](Session::get). #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct QueryConsolidation { From 193e2230680c7638718889094fddf4a9d6a8859a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 25 Apr 2024 18:09:26 +0200 Subject: [PATCH 290/357] Remove keyexpr with_parameters (#979) * Remove keyexpr with_parameters * Fix nextest.toml * Update plugins/zenoh-plugin-storage-manager/src/replica/storage.rs Co-authored-by: Joseph Perez --------- Co-authored-by: Joseph Perez --- .config/nextest.toml | 2 +- .../src/replica/aligner.rs | 8 ++++---- .../src/replica/storage.rs | 2 +- zenoh/src/key_expr.rs | 16 +--------------- zenoh/src/selector.rs | 7 +++++-- 5 files changed, 12 insertions(+), 23 deletions(-) diff --git a/.config/nextest.toml b/.config/nextest.toml index b2ed4cde98..4999dce0d3 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -16,7 +16,7 @@ test(=three_node_combination) | test(=watchdog_alloc_concurrent) | test(=header_check_memory_concurrent) | test(=header_link_concurrent) | -test(=header_link_failure_concurrent) +test(=header_link_failure_concurrent) | test(=downsampling_by_keyexpr) """ threads-required = 'num-cpus' diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 3392bf28e8..75368783b5 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -316,10 +316,10 @@ impl Aligner { async fn perform_query(&self, from: &str, properties: String) -> (Vec, bool) { let mut no_err = true; - let selector = KeyExpr::from(&self.digest_key) - .join(&from) - .unwrap() - .with_parameters(&properties); + let selector = Selector::new( + KeyExpr::from(&self.digest_key).join(&from).unwrap(), + properties, + ); tracing::trace!("[ALIGNER] Sending Query '{}'...", selector); let mut return_val = Vec::new(); match self diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 14425f4c28..0dc8bcb79d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -638,7 +638,7 @@ impl StorageService { // with `_time=[..]` to get historical data (in case of time-series) let replies = match self .session - .get(KeyExpr::from(&self.key_expr).with_parameters("_time=[..]")) + .get(Selector::new(&self.key_expr, "_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) .res() diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index c1c0504208..419918d547 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -57,7 +57,7 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; -use crate::{net::primitives::Primitives, prelude::Selector, Session, Undeclarable}; +use crate::{net::primitives::Primitives, Session, Undeclarable}; #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { @@ -301,20 +301,6 @@ impl<'a> KeyExpr<'a> { Ok(r.into()) } } - - pub fn with_parameters(self, selector: &'a str) -> Selector<'a> { - Selector { - key_expr: self, - parameters: selector.into(), - } - } - - pub fn with_owned_parameters(self, selector: String) -> Selector<'a> { - Selector { - key_expr: self, - parameters: selector.into(), - } - } } impl FromStr for KeyExpr<'static> { diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs index 2c7fc2d782..343f5fda1d 100644 --- a/zenoh/src/selector.rs +++ b/zenoh/src/selector.rs @@ -239,7 +239,7 @@ impl TryFrom for Selector<'_> { Some(qmark_position) => { let parameters = s[qmark_position + 1..].to_owned(); s.truncate(qmark_position); - Ok(KeyExpr::try_from(s)?.with_owned_parameters(parameters)) + Ok(Selector::new(KeyExpr::try_from(s)?, parameters)) } None => Ok(KeyExpr::try_from(s)?.into()), } @@ -252,7 +252,10 @@ impl<'a> TryFrom<&'a str> for Selector<'a> { match s.find('?') { Some(qmark_position) => { let params = &s[qmark_position + 1..]; - Ok(KeyExpr::try_from(&s[..qmark_position])?.with_parameters(params)) + Ok(Selector::new( + KeyExpr::try_from(&s[..qmark_position])?, + params, + )) } None => Ok(KeyExpr::try_from(s)?.into()), } From 88af83f596f19a3c947e06478ab0c7c6717dfa65 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 01:54:55 +0200 Subject: [PATCH 291/357] cargo fmt --- zenoh/src/api/key_expr.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 260399922f..774cf28790 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -12,10 +12,7 @@ // ZettaScale Zenoh Team, // -use super::{ - selector::Selector, - session::{Session, Undeclarable}, -}; +use super::session::{Session, Undeclarable}; use crate::net::primitives::Primitives; use std::{ convert::{TryFrom, TryInto}, From 8e636ed8cb6fcd9841d6078ebabecfa533ca6769 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 02:03:19 +0200 Subject: [PATCH 292/357] build fixes --- zenoh/src/api/builders/publication.rs | 2 -- zenoh/src/api/session.rs | 6 ++---- zenoh/src/lib.rs | 1 - 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index ef2224193f..711cb063f6 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -20,7 +20,6 @@ use crate::api::bytes::OptionZBytes; use crate::api::bytes::ZBytes; use crate::api::key_expr::KeyExpr; use crate::api::publication::Priority; -#[cfg(feature = "unstable")] use crate::api::sample::Locality; use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] @@ -255,7 +254,6 @@ pub struct PublisherBuilder<'a, 'b: 'a> { pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, pub(crate) is_express: bool, - #[cfg(feature = "unstable")] pub(crate) destination: Locality, } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index c481b01bdf..4fc0df5c1a 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -22,10 +22,7 @@ use super::{ info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, publication::Priority, - query::{ - ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply, - _REPLY_KEY_EXPR_ANY_SEL_PARAM, - }, + query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, selector::{Selector, TIME_RANGE_KEY}, @@ -87,6 +84,7 @@ use super::{ liveliness::{Liveliness, LivelinessTokenState}, publication::Publisher, publication::{MatchingListenerState, MatchingStatus}, + query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, sample::SourceInfo, }; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index fce115cfb1..3c011e2439 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -271,7 +271,6 @@ pub mod publication { /// Query primitives pub mod query { - pub use crate::api::query::Mode; pub use crate::api::query::Reply; #[zenoh_macros::unstable] pub use crate::api::query::ReplyKeyExpr; From fc9e2d3cbbc2dab248cdf72f7f419a062e54f4f1 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 26 Apr 2024 11:26:50 +0200 Subject: [PATCH 293/357] Fix clippy warnings --- zenoh/src/net/runtime/adminspace.rs | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 2ce736c1fa..c724ede9bf 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -714,15 +714,8 @@ fn subscribers_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - let payload = match ZBytes::try_from( - serde_json::to_string(&sub.1).unwrap_or_else(|_| "{}".to_string()), - ) { - Ok(p) => p, - Err(e) => { - tracing::error!("Error serializing AdminSpace reply: {:?}", e); - return; - } - }; + let payload = + ZBytes::from(serde_json::to_string(&sub.1).unwrap_or_else(|_| "{}".to_string())); if let Err(e) = query .reply(key, payload) .encoding(Encoding::APPLICATION_JSON) @@ -745,15 +738,8 @@ fn queryables_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - let payload = match ZBytes::try_from( - serde_json::to_string(&qabl.1).unwrap_or_else(|_| "{}".to_string()), - ) { - Ok(p) => p, - Err(e) => { - tracing::error!("Error serializing AdminSpace reply: {:?}", e); - return; - } - }; + let payload = + ZBytes::from(serde_json::to_string(&qabl.1).unwrap_or_else(|_| "{}".to_string())); if let Err(e) = query .reply(key, payload) .encoding(Encoding::APPLICATION_JSON) From 42bd3e4e5305d5b9c35ad01a9e1b890b9c950183 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 26 Apr 2024 11:56:18 +0200 Subject: [PATCH 294/357] Fix valgrind check --- ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index bc8716bb45..364617eb2a 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -20,15 +20,13 @@ use zenoh::prelude::r#async::*; async fn main() { zenoh_util::init_log_test(); - let _z = zenoh_runtime::ZRuntimePoolGuard; - let queryable_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); let get_selector = Selector::try_from("test/valgrind/**").unwrap(); println!("Declaring Queryable on '{queryable_key_expr}'..."); let queryable_session = zenoh::open(Config::default()).res().await.unwrap(); let _queryable = queryable_session - .declare_queryable(queryable_key_expr) + .declare_queryable(queryable_key_expr.clone()) .callback(move |query| { println!(">> Handling query '{}'", query.selector()); let queryable_key_expr = queryable_key_expr.clone(); From 9284388c466b3ad5822e826bd612ccb847bd5eba Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 26 Apr 2024 12:00:32 +0200 Subject: [PATCH 295/357] Add payload_mut to sample for zsliceshmmut deserialization --- examples/examples/z_sub_shm.rs | 20 ++++++++++++++++++++ zenoh/src/sample/mod.rs | 6 ++++++ 2 files changed, 26 insertions(+) diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index aa3967becd..35fb80d833 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -49,6 +49,26 @@ async fn main() { } } } + + // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber + // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. + // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. + // + // use zenoh::shm::slice::zsliceshmmut::zsliceshmmut; + + // while let Ok(mut sample) = subscriber.recv_async().await { + // let kind = sample.kind(); + // let key_expr = sample.key_expr().to_string(); + // match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + // Ok(payload) => println!( + // ">> [Subscriber] Received {} ('{}': '{:02x?}')", + // kind, key_expr, payload + // ), + // Err(e) => { + // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + // } + // } + // } } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample/mod.rs index b5dbd727ec..0c1180fb8f 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample/mod.rs @@ -308,6 +308,12 @@ impl Sample { &self.payload } + /// Gets the payload of this Sample. + #[inline] + pub fn payload_mut(&mut self) -> &mut ZBytes { + &mut self.payload + } + /// Gets the kind of this Sample. #[inline] pub fn kind(&self) -> SampleKind { From c4f7a49435c022bef1699af6a2d63dd7dd70b3c6 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 12:15:00 +0200 Subject: [PATCH 296/357] clippy fix --- plugins/zenoh-plugin-storage-manager/src/replica/storage.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 5f72192663..ba078c0012 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -34,6 +34,7 @@ use zenoh::key_expr::OwnedKeyExpr; use zenoh::query::{ConsolidationMode, QueryTarget}; use zenoh::sample::{Sample, SampleKind, TimestampBuilderTrait}; use zenoh::sample::{SampleBuilder, ValueBuilderTrait}; +use zenoh::selector::Selector; use zenoh::session::SessionDeclarations; use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; use zenoh::value::Value; From 7f2e7a5a4339285822553d8074dcc1964dc28665 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 12:18:12 +0200 Subject: [PATCH 297/357] missing merged file added --- commons/zenoh-config/src/lib.rs | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 26f7cfefaa..1029446557 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -35,7 +35,7 @@ use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; use zenoh_core::zlock; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, Priority, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, }; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Bits}, @@ -482,9 +482,6 @@ validated_struct::validator! { /// To use it, you must enable zenoh's unstable feature flag. ///

AdminSpaceConf { - /// Enable the admin space - #[serde(default = "set_false")] - pub enabled: bool, /// Permissions on the admin space pub permissions: PermissionsConf { @@ -510,11 +507,7 @@ validated_struct::validator! { /// A list of directories where plugins may be searched for if no `__path__` was specified for them. /// The executable's current directory will be added to the search paths. - pub plugins_loading: #[derive(Default)] - PluginsLoading { - pub enabled: bool, - pub search_dirs: Option>, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) - }, + plugins_search_dirs: Vec, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) #[validated(recursive_accessors)] /// The configuration for plugins. /// @@ -728,13 +721,10 @@ impl Config { } pub fn libloader(&self) -> LibLoader { - if self.plugins_loading.enabled { - match self.plugins_loading.search_dirs() { - Some(dirs) => LibLoader::new(dirs, true), - None => LibLoader::default(), - } + if self.plugins_search_dirs.is_empty() { + LibLoader::default() } else { - LibLoader::empty() + LibLoader::new(&self.plugins_search_dirs, true) } } } From 509c7279ba6d9ea54150162225255884707e38a1 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 12:23:24 +0200 Subject: [PATCH 298/357] priority removed --- commons/zenoh-config/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 1029446557..e7d73248db 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -35,7 +35,7 @@ use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; use zenoh_core::zlock; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, Priority, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, }; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Bits}, From 983bd89191302ba31f6129c127d59b1733a05859 Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 12:28:25 +0200 Subject: [PATCH 299/357] changes in config restored --- commons/zenoh-config/src/lib.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index e7d73248db..26f7cfefaa 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -482,6 +482,9 @@ validated_struct::validator! { /// To use it, you must enable zenoh's unstable feature flag. /// AdminSpaceConf { + /// Enable the admin space + #[serde(default = "set_false")] + pub enabled: bool, /// Permissions on the admin space pub permissions: PermissionsConf { @@ -507,7 +510,11 @@ validated_struct::validator! { /// A list of directories where plugins may be searched for if no `__path__` was specified for them. /// The executable's current directory will be added to the search paths. - plugins_search_dirs: Vec, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) + pub plugins_loading: #[derive(Default)] + PluginsLoading { + pub enabled: bool, + pub search_dirs: Option>, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) + }, #[validated(recursive_accessors)] /// The configuration for plugins. /// @@ -721,10 +728,13 @@ impl Config { } pub fn libloader(&self) -> LibLoader { - if self.plugins_search_dirs.is_empty() { - LibLoader::default() + if self.plugins_loading.enabled { + match self.plugins_loading.search_dirs() { + Some(dirs) => LibLoader::new(dirs, true), + None => LibLoader::default(), + } } else { - LibLoader::new(&self.plugins_search_dirs, true) + LibLoader::empty() } } } From a0c78df48b90917be063c4e2e56a1cd16499395d Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 14:55:51 +0200 Subject: [PATCH 300/357] removed old bytes.rs --- zenoh/src/bytes.rs | 2082 -------------------------------------------- 1 file changed, 2082 deletions(-) delete mode 100644 zenoh/src/bytes.rs diff --git a/zenoh/src/bytes.rs b/zenoh/src/bytes.rs deleted file mode 100644 index c36136ef81..0000000000 --- a/zenoh/src/bytes.rs +++ /dev/null @@ -1,2082 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! ZBytes primitives. -use crate::buffers::ZBuf; -use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, - string::FromUtf8Error, sync::Arc, -}; -use unwrap_infallible::UnwrapInfallible; -use zenoh_buffers::{ - buffer::{Buffer, SplitBuffer}, - reader::HasReader, - writer::HasWriter, - ZBufReader, ZBufWriter, ZSlice, -}; -use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; -use zenoh_result::{ZError, ZResult}; -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -use zenoh_shm::{ - api::slice::{ - zsliceshm::{zsliceshm, ZSliceShm}, - zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, - }, - SharedMemoryBuf, -}; - -/// Trait to encode a type `T` into a [`Value`]. -pub trait Serialize { - type Output; - - /// The implementer should take care of serializing the type `T` and set the proper [`Encoding`]. - fn serialize(self, t: T) -> Self::Output; -} - -pub trait Deserialize<'a, T> { - type Input: 'a; - type Error; - - /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: Self::Input) -> Result; -} - -/// ZBytes contains the serialized bytes of user data. -#[repr(transparent)] -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct ZBytes(ZBuf); - -impl ZBytes { - /// Create an empty ZBytes. - pub const fn empty() -> Self { - Self(ZBuf::empty()) - } - - /// Create a [`ZBytes`] from any type `T` that implements [`Into`]. - pub fn new(t: T) -> Self - where - T: Into, - { - Self(t.into()) - } - - /// Returns wether the ZBytes is empty or not. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns the length of the ZBytes. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. - pub fn reader(&self) -> ZBytesReader<'_> { - ZBytesReader(self.0.reader()) - } - - /// Build a [`ZBytes`] from a generic reader implementing [`std::io::Read`]. This operation copies data from the reader. - pub fn from_reader(mut reader: R) -> Result - where - R: std::io::Read, - { - let mut buf: Vec = vec![]; - reader.read_to_end(&mut buf)?; - Ok(ZBytes::new(buf)) - } - - /// Get a [`ZBytesWriter`] implementing [`std::io::Write`] trait. - pub fn writer(&mut self) -> ZBytesWriter<'_> { - ZBytesWriter(self.0.writer()) - } - - /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. - pub fn iter(&self) -> ZBytesIterator<'_, T> - where - T: for<'b> TryFrom<&'b ZBytes>, - for<'b> ZSerde: Deserialize<'b, T>, - for<'b> >::Error: Debug, - { - ZBytesIterator { - reader: self.0.reader(), - _t: PhantomData::, - } - } - - /// Serialize an object of type `T` as a [`Value`] using the [`ZSerde`]. - /// - /// ```rust - /// use zenoh::bytes::ZBytes; - /// - /// let start = String::from("abc"); - /// let bytes = ZBytes::serialize(start.clone()); - /// let end: String = bytes.deserialize().unwrap(); - /// assert_eq!(start, end); - /// ``` - pub fn serialize(t: T) -> Self - where - ZSerde: Serialize, - { - ZSerde.serialize(t) - } - - /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn deserialize<'a, T>(&'a self) -> ZResult - where - ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, - >::Error: Debug, - { - ZSerde - .deserialize(self) - .map_err(|e| zerror!("{:?}", e).into()) - } - - /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn deserialize_mut<'a, T>(&'a mut self) -> ZResult - where - ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes>, - >::Error: Debug, - { - ZSerde - .deserialize(self) - .map_err(|e| zerror!("{:?}", e).into()) - } - - /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn into<'a, T>(&'a self) -> T - where - ZSerde: Deserialize<'a, T, Input = &'a ZBytes, Error = Infallible>, - >::Error: Debug, - { - ZSerde.deserialize(self).unwrap_infallible() - } - - /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn into_mut<'a, T>(&'a mut self) -> T - where - ZSerde: Deserialize<'a, T, Input = &'a mut ZBytes, Error = Infallible>, - >::Error: Debug, - { - ZSerde.deserialize(self).unwrap_infallible() - } -} - -/// A reader that implements [`std::io::Read`] trait to read from a [`ZBytes`]. -#[repr(transparent)] -#[derive(Debug)] -pub struct ZBytesReader<'a>(ZBufReader<'a>); - -impl std::io::Read for ZBytesReader<'_> { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - std::io::Read::read(&mut self.0, buf) - } -} - -impl std::io::Seek for ZBytesReader<'_> { - fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { - std::io::Seek::seek(&mut self.0, pos) - } -} - -/// A writer that implements [`std::io::Write`] trait to write into a [`ZBytes`]. -#[repr(transparent)] -#[derive(Debug)] -pub struct ZBytesWriter<'a>(ZBufWriter<'a>); - -impl std::io::Write for ZBytesWriter<'_> { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - std::io::Write::write(&mut self.0, buf) - } - - fn flush(&mut self) -> std::io::Result<()> { - Ok(()) - } -} - -/// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`ZBytes`]. -/// Note that [`ZBytes`] contains a serialized version of `T` and iterating over a [`ZBytes`] performs lazy deserialization. -#[repr(transparent)] -#[derive(Debug)] -pub struct ZBytesIterator<'a, T> -where - ZSerde: Deserialize<'a, T>, -{ - reader: ZBufReader<'a>, - _t: PhantomData, -} - -impl Iterator for ZBytesIterator<'_, T> -where - for<'a> ZSerde: Deserialize<'a, T, Input = &'a ZBytes>, - for<'a> >::Error: Debug, -{ - type Item = T; - - fn next(&mut self) -> Option { - let codec = Zenoh080::new(); - - let kbuf: ZBuf = codec.read(&mut self.reader).ok()?; - let kpld = ZBytes::new(kbuf); - - let t = ZSerde.deserialize(&kpld).ok()?; - Some(t) - } -} - -impl FromIterator for ZBytes -where - ZSerde: Serialize, -{ - fn from_iter>(iter: T) -> Self { - let codec = Zenoh080::new(); - let mut buffer: ZBuf = ZBuf::empty(); - let mut writer = buffer.writer(); - for t in iter { - let tpld = ZSerde.serialize(t); - // SAFETY: we are serializing slices on a ZBuf, so serialization will never - // fail unless we run out of memory. In that case, Rust memory allocator - // will panic before the serializer has any chance to fail. - unsafe { - codec.write(&mut writer, &tpld.0).unwrap_unchecked(); - } - } - - ZBytes::new(buffer) - } -} - -/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. -#[repr(transparent)] -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct OptionZBytes(Option); - -impl From for OptionZBytes -where - T: Into, -{ - fn from(value: T) -> Self { - Self(Some(value.into())) - } -} - -impl From> for OptionZBytes -where - T: Into, -{ - fn from(mut value: Option) -> Self { - match value.take() { - Some(v) => Self(Some(v.into())), - None => Self(None), - } - } -} - -impl From<&Option> for OptionZBytes -where - for<'a> &'a T: Into, -{ - fn from(value: &Option) -> Self { - match value.as_ref() { - Some(v) => Self(Some(v.into())), - None => Self(None), - } - } -} - -impl From for Option { - fn from(value: OptionZBytes) -> Self { - value.0 - } -} - -/// The default serializer for ZBytes. It supports primitives types, such as: Vec, int, uint, float, string, bool. -/// It also supports common Rust serde values. -#[derive(Clone, Copy, Debug)] -pub struct ZSerde; - -#[derive(Debug, Clone, Copy)] -pub struct ZDeserializeError; - -// ZBuf -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: ZBuf) -> Self::Output { - ZBytes::new(t) - } -} - -impl From for ZBytes { - fn from(t: ZBuf) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&ZBuf> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &ZBuf) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&ZBuf> for ZBytes { - fn from(t: &ZBuf) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut ZBuf> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut ZBuf) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&mut ZBuf> for ZBytes { - fn from(t: &mut ZBuf) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, ZBuf> for ZSerde { - type Input = &'a ZBytes; - type Error = Infallible; - - fn deserialize(self, v: Self::Input) -> Result { - Ok(v.0.clone()) - } -} - -impl From for ZBuf { - fn from(value: ZBytes) -> Self { - value.0 - } -} - -impl From<&ZBytes> for ZBuf { - fn from(value: &ZBytes) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -impl From<&mut ZBytes> for ZBuf { - fn from(value: &mut ZBytes) -> Self { - ZSerde.deserialize(&*value).unwrap_infallible() - } -} - -// ZSlice -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: ZSlice) -> Self::Output { - ZBytes::new(t) - } -} - -impl From for ZBytes { - fn from(t: ZSlice) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&ZSlice> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &ZSlice) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&ZSlice> for ZBytes { - fn from(t: &ZSlice) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut ZSlice> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut ZSlice) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&mut ZSlice> for ZBytes { - fn from(t: &mut ZSlice) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, ZSlice> for ZSerde { - type Input = &'a ZBytes; - type Error = Infallible; - - fn deserialize(self, v: Self::Input) -> Result { - Ok(v.0.to_zslice()) - } -} - -impl From for ZSlice { - fn from(value: ZBytes) -> Self { - ZBuf::from(value).to_zslice() - } -} - -impl From<&ZBytes> for ZSlice { - fn from(value: &ZBytes) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -impl From<&mut ZBytes> for ZSlice { - fn from(value: &mut ZBytes) -> Self { - ZSerde.deserialize(&*value).unwrap_infallible() - } -} - -// [u8; N] -impl Serialize<[u8; N]> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: [u8; N]) -> Self::Output { - ZBytes::new(t) - } -} - -impl From<[u8; N]> for ZBytes { - fn from(t: [u8; N]) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&[u8; N]> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &[u8; N]) -> Self::Output { - ZBytes::new(*t) - } -} - -impl From<&[u8; N]> for ZBytes { - fn from(t: &[u8; N]) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut [u8; N]> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut [u8; N]) -> Self::Output { - ZBytes::new(*t) - } -} - -impl From<&mut [u8; N]> for ZBytes { - fn from(t: &mut [u8; N]) -> Self { - ZSerde.serialize(*t) - } -} - -impl<'a, const N: usize> Deserialize<'a, [u8; N]> for ZSerde { - type Input = &'a ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result<[u8; N], Self::Error> { - use std::io::Read; - - if v.0.len() != N { - return Err(ZDeserializeError); - } - let mut dst = [0u8; N]; - let mut reader = v.reader(); - reader.read_exact(&mut dst).map_err(|_| ZDeserializeError)?; - Ok(dst) - } -} - -impl TryFrom for [u8; N] { - type Error = ZDeserializeError; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for [u8; N] { - type Error = ZDeserializeError; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for [u8; N] { - type Error = ZDeserializeError; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// Vec -impl Serialize> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: Vec) -> Self::Output { - ZBytes::new(t) - } -} - -impl From> for ZBytes { - fn from(t: Vec) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&Vec> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &Vec) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&Vec> for ZBytes { - fn from(t: &Vec) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut Vec> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut Vec) -> Self::Output { - ZBytes::new(t.clone()) - } -} - -impl From<&mut Vec> for ZBytes { - fn from(t: &mut Vec) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, Vec> for ZSerde { - type Input = &'a ZBytes; - type Error = Infallible; - - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { - Ok(v.0.contiguous().to_vec()) - } -} - -impl From for Vec { - fn from(value: ZBytes) -> Self { - ZSerde.deserialize(&value).unwrap_infallible() - } -} - -impl From<&ZBytes> for Vec { - fn from(value: &ZBytes) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -impl From<&mut ZBytes> for Vec { - fn from(value: &mut ZBytes) -> Self { - ZSerde.deserialize(&*value).unwrap_infallible() - } -} - -// &[u8] -impl Serialize<&[u8]> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &[u8]) -> Self::Output { - ZBytes::new(t.to_vec()) - } -} - -impl From<&[u8]> for ZBytes { - fn from(t: &[u8]) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut [u8]> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut [u8]) -> Self::Output { - ZSerde.serialize(&*t) - } -} - -impl From<&mut [u8]> for ZBytes { - fn from(t: &mut [u8]) -> Self { - ZSerde.serialize(t) - } -} - -// Cow<[u8]> -impl<'a> Serialize> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: Cow<'a, [u8]>) -> Self::Output { - ZBytes::new(t.to_vec()) - } -} - -impl From> for ZBytes { - fn from(t: Cow<'_, [u8]>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize<&Cow<'a, [u8]>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &Cow<'a, [u8]>) -> Self::Output { - ZBytes::new(t.to_vec()) - } -} - -impl From<&Cow<'_, [u8]>> for ZBytes { - fn from(t: &Cow<'_, [u8]>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize<&mut Cow<'a, [u8]>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut Cow<'a, [u8]>) -> Self::Output { - ZSerde.serialize(&*t) - } -} - -impl From<&mut Cow<'_, [u8]>> for ZBytes { - fn from(t: &mut Cow<'_, [u8]>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { - type Input = &'a ZBytes; - type Error = Infallible; - - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { - Ok(v.0.contiguous()) - } -} - -impl From for Cow<'static, [u8]> { - fn from(v: ZBytes) -> Self { - match v.0.contiguous() { - Cow::Borrowed(s) => Cow::Owned(s.to_vec()), - Cow::Owned(s) => Cow::Owned(s), - } - } -} - -impl<'a> From<&'a ZBytes> for Cow<'a, [u8]> { - fn from(value: &'a ZBytes) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -impl<'a> From<&'a mut ZBytes> for Cow<'a, [u8]> { - fn from(value: &'a mut ZBytes) -> Self { - ZSerde.deserialize(&*value).unwrap_infallible() - } -} - -// String -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: String) -> Self::Output { - ZBytes::new(s.into_bytes()) - } -} - -impl From for ZBytes { - fn from(t: String) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&String> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &String) -> Self::Output { - ZBytes::new(s.clone().into_bytes()) - } -} - -impl From<&String> for ZBytes { - fn from(t: &String) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut String> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &mut String) -> Self::Output { - ZSerde.serialize(&*s) - } -} - -impl From<&mut String> for ZBytes { - fn from(t: &mut String) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, String> for ZSerde { - type Input = &'a ZBytes; - type Error = FromUtf8Error; - - fn deserialize(self, v: Self::Input) -> Result { - let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); - String::from_utf8(v) - } -} - -impl TryFrom for String { - type Error = FromUtf8Error; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for String { - type Error = FromUtf8Error; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for String { - type Error = FromUtf8Error; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// &str -impl Serialize<&str> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &str) -> Self::Output { - ZSerde.serialize(s.to_string()) - } -} - -impl From<&str> for ZBytes { - fn from(t: &str) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut str> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &mut str) -> Self::Output { - ZSerde.serialize(&*s) - } -} - -impl From<&mut str> for ZBytes { - fn from(t: &mut str) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: Cow<'a, str>) -> Self::Output { - Self.serialize(s.to_string()) - } -} - -impl From> for ZBytes { - fn from(t: Cow<'_, str>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize<&Cow<'a, str>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &Cow<'a, str>) -> Self::Output { - ZSerde.serialize(s.to_string()) - } -} - -impl From<&Cow<'_, str>> for ZBytes { - fn from(t: &Cow<'_, str>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize<&mut Cow<'a, str>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, s: &mut Cow<'a, str>) -> Self::Output { - ZSerde.serialize(&*s) - } -} - -impl From<&mut Cow<'_, str>> for ZBytes { - fn from(t: &mut Cow<'_, str>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { - type Input = &'a ZBytes; - type Error = Utf8Error; - - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { - Cow::try_from(v) - } -} - -impl TryFrom for Cow<'static, str> { - type Error = Utf8Error; - - fn try_from(v: ZBytes) -> Result { - let v: Cow<'static, [u8]> = Cow::from(v); - let _ = core::str::from_utf8(v.as_ref())?; - // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 - // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. - Ok(unsafe { core::mem::transmute(v) }) - } -} - -impl<'a> TryFrom<&'a ZBytes> for Cow<'a, str> { - type Error = Utf8Error; - - fn try_from(v: &'a ZBytes) -> Result { - let v: Cow<'a, [u8]> = Cow::from(v); - let _ = core::str::from_utf8(v.as_ref())?; - // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 - // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. - Ok(unsafe { core::mem::transmute(v) }) - } -} - -impl<'a> TryFrom<&'a mut ZBytes> for Cow<'a, str> { - type Error = Utf8Error; - - fn try_from(v: &'a mut ZBytes) -> Result { - let v: Cow<'a, [u8]> = Cow::from(v); - let _ = core::str::from_utf8(v.as_ref())?; - // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 - // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. - Ok(unsafe { core::mem::transmute(v) }) - } -} - -// - Integers impl -macro_rules! impl_int { - ($t:ty) => { - impl Serialize<$t> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: $t) -> Self::Output { - let bs = t.to_le_bytes(); - let mut end = 1; - if t != 0 as $t { - end += bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1); - }; - // SAFETY: - // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 - // - end is a valid end index because is bounded between 0 and bs.len() - ZBytes::new(unsafe { ZSlice::new_unchecked(Arc::new(bs), 0, end) }) - } - } - - impl From<$t> for ZBytes { - fn from(t: $t) -> Self { - ZSerde.serialize(t) - } - } - - impl Serialize<&$t> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &$t) -> Self::Output { - Self.serialize(*t) - } - } - - impl From<&$t> for ZBytes { - fn from(t: &$t) -> Self { - ZSerde.serialize(t) - } - } - - impl Serialize<&mut $t> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut $t) -> Self::Output { - Self.serialize(*t) - } - } - - impl From<&mut $t> for ZBytes { - fn from(t: &mut $t) -> Self { - ZSerde.serialize(t) - } - } - - impl<'a> Deserialize<'a, $t> for ZSerde { - type Input = &'a ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result<$t, Self::Error> { - use std::io::Read; - - let mut r = v.reader(); - let mut bs = (0 as $t).to_le_bytes(); - if v.len() > bs.len() { - return Err(ZDeserializeError); - } - r.read_exact(&mut bs[..v.len()]) - .map_err(|_| ZDeserializeError)?; - let t = <$t>::from_le_bytes(bs); - Ok(t) - } - } - - impl TryFrom for $t { - type Error = ZDeserializeError; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } - } - - impl TryFrom<&ZBytes> for $t { - type Error = ZDeserializeError; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } - } - - impl TryFrom<&mut ZBytes> for $t { - type Error = ZDeserializeError; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } - } - }; -} - -// Zenoh unsigned integers -impl_int!(u8); -impl_int!(u16); -impl_int!(u32); -impl_int!(u64); -impl_int!(usize); - -// Zenoh signed integers -impl_int!(i8); -impl_int!(i16); -impl_int!(i32); -impl_int!(i64); -impl_int!(isize); - -// Zenoh floats -impl_int!(f32); -impl_int!(f64); - -// Zenoh bool -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: bool) -> Self::Output { - // SAFETY: casting a bool into an integer is well-defined behaviour. - // 0 is false, 1 is true: https://doc.rust-lang.org/std/primitive.bool.html - ZBytes::new(ZBuf::from((t as u8).to_le_bytes())) - } -} - -impl From for ZBytes { - fn from(t: bool) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&bool> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &bool) -> Self::Output { - ZSerde.serialize(*t) - } -} - -impl From<&bool> for ZBytes { - fn from(t: &bool) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut bool> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut bool) -> Self::Output { - ZSerde.serialize(*t) - } -} - -impl From<&mut bool> for ZBytes { - fn from(t: &mut bool) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, bool> for ZSerde { - type Input = &'a ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result { - let p = v.deserialize::().map_err(|_| ZDeserializeError)?; - match p { - 0 => Ok(false), - 1 => Ok(true), - _ => Err(ZDeserializeError), - } - } -} - -impl TryFrom for bool { - type Error = ZDeserializeError; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for bool { - type Error = ZDeserializeError; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for bool { - type Error = ZDeserializeError; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// - Zenoh advanced types encoders/decoders -// Properties -impl Serialize> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: Properties<'_>) -> Self::Output { - Self.serialize(t.as_str()) - } -} - -impl From> for ZBytes { - fn from(t: Properties<'_>) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&Properties<'_>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &Properties<'_>) -> Self::Output { - Self.serialize(t.as_str()) - } -} - -impl<'s> From<&'s Properties<'s>> for ZBytes { - fn from(t: &'s Properties<'s>) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&mut Properties<'_>> for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: &mut Properties<'_>) -> Self::Output { - Self.serialize(t.as_str()) - } -} - -impl<'s> From<&'s mut Properties<'s>> for ZBytes { - fn from(t: &'s mut Properties<'s>) -> Self { - ZSerde.serialize(&*t) - } -} - -impl<'s> Deserialize<'s, Properties<'s>> for ZSerde { - type Input = &'s ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result, Self::Error> { - let s = v - .deserialize::>() - .map_err(|_| ZDeserializeError)?; - Ok(Properties::from(s)) - } -} - -impl TryFrom for Properties<'static> { - type Error = ZDeserializeError; - - fn try_from(v: ZBytes) -> Result { - let s = v.deserialize::>().map_err(|_| ZDeserializeError)?; - Ok(Properties::from(s.into_owned())) - } -} - -impl<'s> TryFrom<&'s ZBytes> for Properties<'s> { - type Error = ZDeserializeError; - - fn try_from(value: &'s ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl<'s> TryFrom<&'s mut ZBytes> for Properties<'s> { - type Error = ZDeserializeError; - - fn try_from(value: &'s mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// JSON -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_json::Value) -> Self::Output { - ZSerde.serialize(&t) - } -} - -impl TryFrom for ZBytes { - type Error = serde_json::Error; - - fn try_from(value: serde_json::Value) -> Result { - ZSerde.serialize(&value) - } -} - -impl Serialize<&serde_json::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_json::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_json::to_writer(bytes.writer(), t)?; - Ok(bytes) - } -} - -impl TryFrom<&serde_json::Value> for ZBytes { - type Error = serde_json::Error; - - fn try_from(value: &serde_json::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&mut serde_json::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &mut serde_json::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_json::to_writer(bytes.writer(), t)?; - Ok(bytes) - } -} - -impl TryFrom<&mut serde_json::Value> for ZBytes { - type Error = serde_json::Error; - - fn try_from(value: &mut serde_json::Value) -> Result { - ZSerde.serialize(&*value) - } -} - -impl<'a> Deserialize<'a, serde_json::Value> for ZSerde { - type Input = &'a ZBytes; - type Error = serde_json::Error; - - fn deserialize(self, v: Self::Input) -> Result { - serde_json::from_reader(v.reader()) - } -} - -impl TryFrom for serde_json::Value { - type Error = serde_json::Error; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for serde_json::Value { - type Error = serde_json::Error; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for serde_json::Value { - type Error = serde_json::Error; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// Yaml -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_yaml::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for ZBytes { - type Error = serde_yaml::Error; - - fn try_from(value: serde_yaml::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&serde_yaml::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_yaml::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_yaml::to_writer(bytes.writer(), t)?; - Ok(bytes) - } -} - -impl TryFrom<&serde_yaml::Value> for ZBytes { - type Error = serde_yaml::Error; - - fn try_from(value: &serde_yaml::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&mut serde_yaml::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &mut serde_yaml::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_yaml::to_writer(bytes.writer(), t)?; - Ok(bytes) - } -} - -impl TryFrom<&mut serde_yaml::Value> for ZBytes { - type Error = serde_yaml::Error; - - fn try_from(value: &mut serde_yaml::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl<'a> Deserialize<'a, serde_yaml::Value> for ZSerde { - type Input = &'a ZBytes; - type Error = serde_yaml::Error; - - fn deserialize(self, v: Self::Input) -> Result { - serde_yaml::from_reader(v.reader()) - } -} - -impl TryFrom for serde_yaml::Value { - type Error = serde_yaml::Error; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for serde_yaml::Value { - type Error = serde_yaml::Error; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for serde_yaml::Value { - type Error = serde_yaml::Error; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// CBOR -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_cbor::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for ZBytes { - type Error = serde_cbor::Error; - - fn try_from(value: serde_cbor::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&serde_cbor::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_cbor::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_cbor::to_writer(bytes.0.writer(), t)?; - Ok(bytes) - } -} - -impl TryFrom<&serde_cbor::Value> for ZBytes { - type Error = serde_cbor::Error; - - fn try_from(value: &serde_cbor::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&mut serde_cbor::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &mut serde_cbor::Value) -> Self::Output { - ZSerde.serialize(&*t) - } -} - -impl TryFrom<&mut serde_cbor::Value> for ZBytes { - type Error = serde_cbor::Error; - - fn try_from(value: &mut serde_cbor::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl<'a> Deserialize<'a, serde_cbor::Value> for ZSerde { - type Input = &'a ZBytes; - type Error = serde_cbor::Error; - - fn deserialize(self, v: Self::Input) -> Result { - serde_cbor::from_reader(v.reader()) - } -} - -impl TryFrom for serde_cbor::Value { - type Error = serde_cbor::Error; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for serde_cbor::Value { - type Error = serde_cbor::Error; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for serde_cbor::Value { - type Error = serde_cbor::Error; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// Pickle -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_pickle::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for ZBytes { - type Error = serde_pickle::Error; - - fn try_from(value: serde_pickle::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&serde_pickle::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_pickle::Value) -> Self::Output { - let mut bytes = ZBytes::empty(); - serde_pickle::value_to_writer( - &mut bytes.0.writer(), - t, - serde_pickle::SerOptions::default(), - )?; - Ok(bytes) - } -} - -impl TryFrom<&serde_pickle::Value> for ZBytes { - type Error = serde_pickle::Error; - - fn try_from(value: &serde_pickle::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&mut serde_pickle::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &mut serde_pickle::Value) -> Self::Output { - ZSerde.serialize(&*t) - } -} - -impl TryFrom<&mut serde_pickle::Value> for ZBytes { - type Error = serde_pickle::Error; - - fn try_from(value: &mut serde_pickle::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl<'a> Deserialize<'a, serde_pickle::Value> for ZSerde { - type Input = &'a ZBytes; - type Error = serde_pickle::Error; - - fn deserialize(self, v: Self::Input) -> Result { - serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) - } -} - -impl TryFrom for serde_pickle::Value { - type Error = serde_pickle::Error; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for serde_pickle::Value { - type Error = serde_pickle::Error; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for serde_pickle::Value { - type Error = serde_pickle::Error; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// Shared memory conversion -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: ZSliceShm) -> Self::Output { - let slice: ZSlice = t.into(); - ZBytes::new(slice) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShm) -> Self { - ZSerde.serialize(t) - } -} - -// Shared memory conversion -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { - type Output = ZBytes; - - fn serialize(self, t: ZSliceShmMut) -> Self::Output { - let slice: ZSlice = t.into(); - ZBytes::new(slice) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShmMut) -> Self { - ZSerde.serialize(t) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { - type Input = &'a ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result<&'a zsliceshm, Self::Error> { - // A ZSliceShm is expected to have only one slice - let mut zslices = v.0.zslices(); - if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_ref::() { - return Ok(shmb.into()); - } - } - Err(ZDeserializeError) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { - type Error = ZDeserializeError; - - fn try_from(value: &'a ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { - type Error = ZDeserializeError; - - fn try_from(value: &'a mut ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { - type Input = &'a mut ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshm, Self::Error> { - // A ZSliceShmBorrowMut is expected to have only one slice - let mut zslices = v.0.zslices_mut(); - if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_mut::() { - return Ok(shmb.into()); - } - } - Err(ZDeserializeError) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { - type Input = &'a mut ZBytes; - type Error = ZDeserializeError; - - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshmmut, Self::Error> { - // A ZSliceShmBorrowMut is expected to have only one slice - let mut zslices = v.0.zslices_mut(); - if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_mut::() { - return shmb.try_into().map_err(|_| ZDeserializeError); - } - } - Err(ZDeserializeError) - } -} - -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshmmut { - type Error = ZDeserializeError; - - fn try_from(value: &'a mut ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -// Tuple -macro_rules! impl_tuple { - ($t:expr) => {{ - let (a, b) = $t; - - let codec = Zenoh080::new(); - let mut buffer: ZBuf = ZBuf::empty(); - let mut writer = buffer.writer(); - let apld: ZBytes = a.into(); - let bpld: ZBytes = b.into(); - - // SAFETY: we are serializing slices on a ZBuf, so serialization will never - // fail unless we run out of memory. In that case, Rust memory allocator - // will panic before the serializer has any chance to fail. - unsafe { - codec.write(&mut writer, &apld.0).unwrap_unchecked(); - codec.write(&mut writer, &bpld.0).unwrap_unchecked(); - } - - ZBytes::new(buffer) - }}; -} -impl Serialize<(A, B)> for ZSerde -where - A: Into, - B: Into, -{ - type Output = ZBytes; - - fn serialize(self, t: (A, B)) -> Self::Output { - impl_tuple!(t) - } -} - -impl Serialize<&(A, B)> for ZSerde -where - for<'a> &'a A: Into, - for<'b> &'b B: Into, -{ - type Output = ZBytes; - - fn serialize(self, t: &(A, B)) -> Self::Output { - impl_tuple!(t) - } -} - -impl From<(A, B)> for ZBytes -where - A: Into, - B: Into, -{ - fn from(value: (A, B)) -> Self { - ZSerde.serialize(value) - } -} - -impl<'s, A, B> Deserialize<'s, (A, B)> for ZSerde -where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, -{ - type Input = &'s ZBytes; - type Error = ZError; - - fn deserialize(self, bytes: Self::Input) -> Result<(A, B), Self::Error> { - let codec = Zenoh080::new(); - let mut reader = bytes.0.reader(); - - let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - let apld = ZBytes::new(abuf); - - let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - let bpld = ZBytes::new(bbuf); - - let a = A::try_from(apld).map_err(|e| zerror!("{:?}", e))?; - let b = B::try_from(bpld).map_err(|e| zerror!("{:?}", e))?; - Ok((a, b)) - } -} - -impl TryFrom for (A, B) -where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, -{ - type Error = ZError; - - fn try_from(value: ZBytes) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&ZBytes> for (A, B) -where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, -{ - type Error = ZError; - - fn try_from(value: &ZBytes) -> Result { - ZSerde.deserialize(value) - } -} - -impl TryFrom<&mut ZBytes> for (A, B) -where - A: TryFrom + 'static, - >::Error: Debug + 'static, - B: TryFrom + 'static, - >::Error: Debug + 'static, -{ - type Error = ZError; - - fn try_from(value: &mut ZBytes) -> Result { - ZSerde.deserialize(&*value) - } -} - -// For convenience to always convert a Value in the examples -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum StringOrBase64 { - String(String), - Base64(String), -} - -impl StringOrBase64 { - pub fn into_string(self) -> String { - match self { - StringOrBase64::String(s) | StringOrBase64::Base64(s) => s, - } - } -} - -impl Deref for StringOrBase64 { - type Target = String; - - fn deref(&self) -> &Self::Target { - match self { - Self::String(s) | Self::Base64(s) => s, - } - } -} - -impl std::fmt::Display for StringOrBase64 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(self) - } -} - -impl From<&ZBytes> for StringOrBase64 { - fn from(v: &ZBytes) -> Self { - use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; - match v.deserialize::() { - Ok(s) => StringOrBase64::String(s), - Err(_) => StringOrBase64::Base64(b64_std_engine.encode(v.into::>())), - } - } -} - -impl From<&mut ZBytes> for StringOrBase64 { - fn from(v: &mut ZBytes) -> Self { - StringOrBase64::from(&*v) - } -} - -// Protocol attachment extension -impl From for AttachmentType { - fn from(this: ZBytes) -> Self { - AttachmentType { - buffer: this.into(), - } - } -} - -impl From> for ZBytes { - fn from(this: AttachmentType) -> Self { - this.buffer.into() - } -} - -mod tests { - #[test] - fn serializer() { - use super::ZBytes; - use rand::Rng; - use std::borrow::Cow; - use zenoh_buffers::{ZBuf, ZSlice}; - use zenoh_protocol::core::Properties; - - #[cfg(all(feature = "shared-memory", feature = "unstable"))] - use zenoh_shm::api::{ - protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, - }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, - slice::zsliceshm::{zsliceshm, ZSliceShm}, - }; - - const NUM: usize = 1_000; - - macro_rules! serialize_deserialize { - ($t:ty, $in:expr) => { - let i = $in; - let t = i.clone(); - println!("Serialize:\t{:?}", t); - let v = ZBytes::serialize(t); - println!("Deserialize:\t{:?}", v); - let o: $t = v.deserialize().unwrap(); - assert_eq!(i, o); - println!(""); - }; - } - - // WARN: test function body produces stack overflow, so I split it into subroutines - #[inline(never)] - fn numeric() { - let mut rng = rand::thread_rng(); - - // unsigned integer - serialize_deserialize!(u8, u8::MIN); - serialize_deserialize!(u16, u16::MIN); - serialize_deserialize!(u32, u32::MIN); - serialize_deserialize!(u64, u64::MIN); - serialize_deserialize!(usize, usize::MIN); - - serialize_deserialize!(u8, u8::MAX); - serialize_deserialize!(u16, u16::MAX); - serialize_deserialize!(u32, u32::MAX); - serialize_deserialize!(u64, u64::MAX); - serialize_deserialize!(usize, usize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(u8, rng.gen::()); - serialize_deserialize!(u16, rng.gen::()); - serialize_deserialize!(u32, rng.gen::()); - serialize_deserialize!(u64, rng.gen::()); - serialize_deserialize!(usize, rng.gen::()); - } - - // signed integer - serialize_deserialize!(i8, i8::MIN); - serialize_deserialize!(i16, i16::MIN); - serialize_deserialize!(i32, i32::MIN); - serialize_deserialize!(i64, i64::MIN); - serialize_deserialize!(isize, isize::MIN); - - serialize_deserialize!(i8, i8::MAX); - serialize_deserialize!(i16, i16::MAX); - serialize_deserialize!(i32, i32::MAX); - serialize_deserialize!(i64, i64::MAX); - serialize_deserialize!(isize, isize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(i8, rng.gen::()); - serialize_deserialize!(i16, rng.gen::()); - serialize_deserialize!(i32, rng.gen::()); - serialize_deserialize!(i64, rng.gen::()); - serialize_deserialize!(isize, rng.gen::()); - } - - // float - serialize_deserialize!(f32, f32::MIN); - serialize_deserialize!(f64, f64::MIN); - - serialize_deserialize!(f32, f32::MAX); - serialize_deserialize!(f64, f64::MAX); - - for _ in 0..NUM { - serialize_deserialize!(f32, rng.gen::()); - serialize_deserialize!(f64, rng.gen::()); - } - } - numeric(); - - // WARN: test function body produces stack overflow, so I split it into subroutines - #[inline(never)] - fn basic() { - // String - serialize_deserialize!(String, ""); - serialize_deserialize!(String, String::from("abcdef")); - - // Cow - serialize_deserialize!(Cow, Cow::from("")); - serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); - - // Vec - serialize_deserialize!(Vec, vec![0u8; 0]); - serialize_deserialize!(Vec, vec![0u8; 64]); - - // Cow<[u8]> - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); - - // ZBuf - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); - } - basic(); - - // SHM - #[cfg(all(feature = "shared-memory", feature = "unstable"))] - { - // create an SHM backend... - let backend = PosixSharedMemoryProviderBackend::builder() - .with_size(4096) - .unwrap() - .res() - .unwrap(); - // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() - .protocol_id::() - .backend(backend) - .res(); - - // Prepare a layout for allocations - let layout = provider.alloc_layout().size(1024).res().unwrap(); - - // allocate an SHM buffer - let mutable_shm_buf = layout.alloc().res().unwrap(); - - // convert to immutable SHM buffer - let immutable_shm_buf: ZSliceShm = mutable_shm_buf.into(); - - serialize_deserialize!(&zsliceshm, immutable_shm_buf); - } - - // Properties - serialize_deserialize!(Properties, Properties::from("")); - serialize_deserialize!(Properties, Properties::from("a=1;b=2;c3")); - - // Tuple - serialize_deserialize!((usize, usize), (0, 1)); - serialize_deserialize!((usize, String), (0, String::from("a"))); - serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); - serialize_deserialize!( - (Cow<'static, [u8]>, Cow<'static, [u8]>), - (Cow::from(vec![0u8; 8]), Cow::from(vec![0u8; 8])) - ); - serialize_deserialize!( - (Cow<'static, str>, Cow<'static, str>), - (Cow::from("a"), Cow::from("b")) - ); - - // Iterator - let v: [usize; 5] = [0, 1, 2, 3, 4]; - println!("Serialize:\t{:?}", v); - let p = ZBytes::from_iter(v.iter()); - println!("Deserialize:\t{:?}\n", p); - for (i, t) in p.iter::().enumerate() { - assert_eq!(i, t); - } - - let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; - println!("Serialize:\t{:?}", v); - let p = ZBytes::from_iter(v.drain(..)); - println!("Deserialize:\t{:?}\n", p); - let mut iter = p.iter::<[u8; 4]>(); - assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); - assert_eq!(iter.next().unwrap(), [4, 5, 6, 7]); - assert_eq!(iter.next().unwrap(), [8, 9, 10, 11]); - assert_eq!(iter.next().unwrap(), [12, 13, 14, 15]); - assert!(iter.next().is_none()); - - use std::collections::HashMap; - let mut hm: HashMap = HashMap::new(); - hm.insert(0, 0); - hm.insert(1, 1); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, usize)>()); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(0, ZSlice::from(vec![0u8; 8])); - hm.insert(1, ZSlice::from(vec![1u8; 16])); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, ZSlice)>()); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(0, ZBuf::from(vec![0u8; 8])); - hm.insert(1, ZBuf::from(vec![1u8; 16])); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>()); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(String::from("0"), String::from("a")); - hm.insert(String::from("1"), String::from("b")); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.iter()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(String, String)>()); - assert_eq!(hm, o); - - let mut hm: HashMap, Cow<'static, str>> = HashMap::new(); - hm.insert(Cow::from("0"), Cow::from("a")); - hm.insert(Cow::from("1"), Cow::from("b")); - println!("Serialize:\t{:?}", hm); - let p = ZBytes::from_iter(hm.iter()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(Cow<'static, str>, Cow<'static, str>)>()); - assert_eq!(hm, o); - } -} From a724fa8500e4e3aa7d690d40f01cb6a8ab416c1a Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Fri, 26 Apr 2024 15:13:08 +0200 Subject: [PATCH 301/357] restored missed typedef usage --- zenoh/src/api/session.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 4fc0df5c1a..01fc345c3b 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -14,7 +14,8 @@ use super::{ admin, builders::publication::{ - PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, + PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, SessionDeleteBuilder, + SessionPutBuilder, }, bytes::ZBytes, encoding::Encoding, @@ -712,13 +713,13 @@ impl Session { &'a self, key_expr: TryIntoKeyExpr, payload: IntoZBytes, - ) -> PublicationBuilder, PublicationBuilderPut> + ) -> SessionPutBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, IntoZBytes: Into, { - PublicationBuilder { + SessionPutBuilder { publisher: self.declare_publisher(key_expr), kind: PublicationBuilderPut { payload: payload.into(), @@ -752,12 +753,12 @@ impl Session { pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> PublicationBuilder, PublicationBuilderDelete> + ) -> SessionDeleteBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, { - PublicationBuilder { + SessionDeleteBuilder { publisher: self.declare_publisher(key_expr), kind: PublicationBuilderDelete, timestamp: None, From 0983d58ef38128566f9b18b26046b09c73d1ab3c Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 26 Apr 2024 16:26:11 +0200 Subject: [PATCH 302/357] Improve SHM examples --- examples/examples/z_pub_shm.rs | 5 ++-- examples/examples/z_sub_shm.rs | 50 +++++++++++++++++----------------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 8287509f1b..0dce88b8e7 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -25,7 +25,6 @@ use zenoh::shm::provider::types::MemoryLayout; use zenoh_examples::CommonArgs; const N: usize = 10; -const K: u32 = 3; #[tokio::main] async fn main() -> Result<(), zenoh::Error> { @@ -81,7 +80,9 @@ async fn main() -> Result<(), zenoh::Error> { .unwrap(); println!("Press CTRL-C to quit..."); - for idx in 0..(K * N as u32) { + for idx in 0..u32::MAX { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + let mut sbuf = layout .alloc() .with_policy::>() diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 35fb80d833..319d8ecf90 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -36,39 +36,39 @@ async fn main() { let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); println!("Press CTRL-C to quit..."); - while let Ok(sample) = subscriber.recv_async().await { - match sample.payload().deserialize::<&zsliceshm>() { - Ok(payload) => println!( - ">> [Subscriber] Received {} ('{}': '{:02x?}')", - sample.kind(), - sample.key_expr().as_str(), - payload - ), - Err(e) => { - println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); - } - } - } - - // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber - // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. - // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. - // - // use zenoh::shm::slice::zsliceshmmut::zsliceshmmut; - - // while let Ok(mut sample) = subscriber.recv_async().await { - // let kind = sample.kind(); - // let key_expr = sample.key_expr().to_string(); - // match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + // while let Ok(sample) = subscriber.recv_async().await { + // match sample.payload().deserialize::<&zsliceshm>() { // Ok(payload) => println!( // ">> [Subscriber] Received {} ('{}': '{:02x?}')", - // kind, key_expr, payload + // sample.kind(), + // sample.key_expr().as_str(), + // payload // ), // Err(e) => { // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); // } // } // } + + // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber + // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. + // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. + // + use zenoh::shm::slice::zsliceshmmut::zsliceshmmut; + + while let Ok(mut sample) = subscriber.recv_async().await { + let kind = sample.kind(); + let key_expr = sample.key_expr().to_string(); + match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + Ok(payload) => println!( + ">> [Subscriber] Received {} ('{}': '{:02x?}')", + kind, key_expr, payload + ), + Err(e) => { + println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + } + } + } } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] From f08ea12c2760d42c122a228f565c9463b7df0ccb Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 26 Apr 2024 16:36:32 +0200 Subject: [PATCH 303/357] Fix merge --- examples/examples/z_sub_shm.rs | 51 +++++++++++++++++----------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index fdb3204ac9..45180f598b 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -14,6 +14,7 @@ use clap::Parser; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh::shm::zsliceshm; use zenoh_examples::CommonArgs; #[tokio::main] @@ -35,39 +36,39 @@ async fn main() { let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); println!("Press CTRL-C to quit..."); - // while let Ok(sample) = subscriber.recv_async().await { - // match sample.payload().deserialize::<&zsliceshm>() { - // Ok(payload) => println!( - // ">> [Subscriber] Received {} ('{}': '{:02x?}')", - // sample.kind(), - // sample.key_expr().as_str(), - // payload - // ), - // Err(e) => { - // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); - // } - // } - // } - - // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber - // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. - // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. - // - use zenoh::shm::slice::zsliceshmmut::zsliceshmmut; - - while let Ok(mut sample) = subscriber.recv_async().await { - let kind = sample.kind(); - let key_expr = sample.key_expr().to_string(); - match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + while let Ok(sample) = subscriber.recv_async().await { + match sample.payload().deserialize::<&zsliceshm>() { Ok(payload) => println!( ">> [Subscriber] Received {} ('{}': '{:02x?}')", - kind, key_expr, payload + sample.kind(), + sample.key_expr().as_str(), + payload ), Err(e) => { println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); } } } + + // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber + // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. + // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. + // + // use zenoh::shm::zsliceshmmut; + + // while let Ok(mut sample) = subscriber.recv_async().await { + // let kind = sample.kind(); + // let key_expr = sample.key_expr().to_string(); + // match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + // Ok(payload) => println!( + // ">> [Subscriber] Received {} ('{}': '{:02x?}')", + // kind, key_expr, payload + // ), + // Err(e) => { + // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + // } + // } + // } } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] From 12376c0d968b60bc49da5643ca3ac6c470edb1ad Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Fri, 26 Apr 2024 18:03:09 +0200 Subject: [PATCH 304/357] Query/Reply shared memory examples --- examples/Cargo.toml | 10 ++ examples/examples/z_get.rs | 2 +- examples/examples/z_get_shm.rs | 158 +++++++++++++++++++++++++++ examples/examples/z_queryable.rs | 7 +- examples/examples/z_queryable_shm.rs | 134 +++++++++++++++++++++++ examples/examples/z_sub_shm.rs | 17 ++- zenoh/src/api/query.rs | 5 + zenoh/src/api/queryable.rs | 37 +++++-- zenoh/src/api/session.rs | 12 +- zenoh/src/net/runtime/adminspace.rs | 6 +- 10 files changed, 357 insertions(+), 31 deletions(-) create mode 100644 examples/examples/z_get_shm.rs create mode 100644 examples/examples/z_queryable_shm.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index e117507ae9..ce268572a6 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -100,6 +100,11 @@ path = "examples/z_pull.rs" name = "z_queryable" path = "examples/z_queryable.rs" +[[example]] +name = "z_queryable_shm" +path = "examples/z_queryable_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_storage" path = "examples/z_storage.rs" @@ -108,6 +113,11 @@ path = "examples/z_storage.rs" name = "z_get" path = "examples/z_get.rs" +[[example]] +name = "z_get_shm" +path = "examples/z_get_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_forward" path = "examples/z_forward.rs" diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 56693d9fa1..76add34286 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -32,7 +32,7 @@ async fn main() { // // By default get receives replies from a FIFO. // // Uncomment this line to use a ring channel instead. // // More information on the ring channel are available in the z_pull example. - .with(zenoh::handlers::RingChannel::default()) + // .with(zenoh::handlers::RingChannel::default()) .value(value) .target(target) .timeout(timeout) diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs new file mode 100644 index 0000000000..c5f766f0f2 --- /dev/null +++ b/examples/examples/z_get_shm.rs @@ -0,0 +1,158 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use std::time::Duration; +use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; + +const N: usize = 10; + +#[tokio::main] +async fn main() { + // initiate logging + zenoh_util::try_init_log_from_env(); + + let (mut config, selector, mut value, target, timeout) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + println!("Opening session..."); + let session = zenoh::open(config).res().await.unwrap(); + + println!("Creating POSIX SHM backend..."); + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + println!("Creating SHM Provider with POSIX backend..."); + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + println!("Allocating Shared Memory Buffer..."); + let layout = shared_memory_provider + .alloc_layout() + .size(1024) + .res() + .unwrap(); + + let mut sbuf = layout + .alloc() + .with_policy::>() + .res_async() + .await + .unwrap(); + + let content = value + .take() + .unwrap_or_else(|| "Get from SharedMemory Rust!".to_string()); + sbuf[0..content.len()].copy_from_slice(content.as_bytes()); + + println!("Sending Query '{selector}'..."); + let replies = session + .get(&selector) + .value(sbuf) + .target(target) + .timeout(timeout) + .res() + .await + .unwrap(); + + while let Ok(reply) = replies.recv_async().await { + match reply.result() { + Ok(sample) => { + print!(">> Received ('{}': ", sample.key_expr().as_str()); + match sample.payload().deserialize::<&zsliceshm>() { + Ok(payload) => println!("'{}')", String::from_utf8_lossy(payload),), + Err(e) => println!("'Not a SharedMemoryBuf: {:?}')", e), + } + } + Err(err) => { + let payload = err + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } + } + } +} + +#[derive(clap::ValueEnum, Clone, Copy, Debug)] +#[value(rename_all = "SCREAMING_SNAKE_CASE")] +enum Qt { + BestMatching, + All, + AllComplete, +} + +#[derive(Parser, Clone, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/**")] + /// The selection of resources to query + selector: Selector<'static>, + /// The value to publish. + value: Option, + #[arg(short, long, default_value = "BEST_MATCHING")] + /// The target queryables of the query. + target: Qt, + #[arg(short = 'o', long, default_value = "10000")] + /// The query timeout in milliseconds. + timeout: u64, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> ( + Config, + Selector<'static>, + Option, + QueryTarget, + Duration, +) { + let args = Args::parse(); + ( + args.common.into(), + args.selector, + args.value, + match args.target { + Qt::BestMatching => QueryTarget::BestMatching, + Qt::All => QueryTarget::All, + Qt::AllComplete => QueryTarget::AllComplete, + }, + Duration::from_millis(args.timeout), + ) +} diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 47f70c30c3..8407f9f66f 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -20,7 +20,12 @@ async fn main() { // initiate logging zenoh_util::try_init_log_from_env(); - let (config, key_expr, value, complete) = parse_args(); + let (mut config, key_expr, value, complete) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs new file mode 100644 index 0000000000..f689e15b51 --- /dev/null +++ b/examples/examples/z_queryable_shm.rs @@ -0,0 +1,134 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use zenoh::prelude::r#async::*; +use zenoh_examples::CommonArgs; + +const N: usize = 10; + +#[tokio::main] +async fn main() { + // initiate logging + zenoh_util::try_init_log_from_env(); + + let (mut config, key_expr, value, complete) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + println!("Opening session..."); + let session = zenoh::open(config).res().await.unwrap(); + + println!("Creating POSIX SHM backend..."); + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + // The initialisation of SHM backend is completely backend-specific and user is free to do + // anything reasonable here. This code is execuated at the provider's first use + + // Alignment for POSIX SHM provider + // All allocations will be aligned corresponding to this alignment - + // that means that the provider will be able to satisfy allocation layouts + // with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // Create layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); + + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + println!("Creating SHM Provider with POSIX backend..."); + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + println!("Declaring Queryable on '{key_expr}'..."); + let queryable = session + .declare_queryable(&key_expr) + .complete(complete) + .res() + .await + .unwrap(); + + println!("Press CTRL-C to quit..."); + while let Ok(query) = queryable.recv_async().await { + print!( + ">> [Queryable] Received Query '{}' ('{}'", + query.selector(), + query.key_expr().as_str(), + ); + if let Some(payload) = query.payload() { + match payload.deserialize::<&zsliceshm>() { + Ok(payload) => print!(": '{}'", String::from_utf8_lossy(payload)), + Err(e) => print!(": 'Not a SharedMemoryBuf: {:?}'", e), + } + } + println!(")"); + + println!("Allocating Shared Memory Buffer..."); + let layout = shared_memory_provider + .alloc_layout() + .size(1024) + .res() + .unwrap(); + + let mut sbuf = layout + .alloc() + .with_policy::>() + .res_async() + .await + .unwrap(); + + sbuf[0..value.len()].copy_from_slice(value.as_bytes()); + + println!( + ">> [Queryable] Responding ('{}': '{}')", + key_expr.as_str(), + value, + ); + query + .reply(key_expr.clone(), sbuf) + .res() + .await + .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); + } +} + +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-queryable")] + /// The key expression matching queries to reply to. + key: KeyExpr<'static>, + #[arg(short, long, default_value = "Queryable from SharedMemory Rust!")] + /// The value to reply to queries. + value: String, + #[arg(long)] + /// Declare the queryable as complete w.r.t. the key expression. + complete: bool, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, KeyExpr<'static>, String, bool) { + let args = Args::parse(); + (args.common.into(), args.key, args.value, args.complete) +} diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 45180f598b..2e0f5bf910 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -37,17 +37,16 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { + print!( + ">> [Subscriber] Received {} ('{}': ", + sample.kind(), + sample.key_expr().as_str(), + ); match sample.payload().deserialize::<&zsliceshm>() { - Ok(payload) => println!( - ">> [Subscriber] Received {} ('{}': '{:02x?}')", - sample.kind(), - sample.key_expr().as_str(), - payload - ), - Err(e) => { - println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); - } + Ok(payload) => print!("'{}'", String::from_utf8_lossy(payload)), + Err(e) => print!("'Not a SharedMemoryBuf: {:?}'", e), } + println!(")"); } // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 1cb4078ee6..d95a1bd417 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -88,6 +88,11 @@ impl Reply { self.result.as_ref() } + /// Gets the a mutable borrowed result of this `Reply`. Use [`Reply::into_result`] to take ownership of the result. + pub fn result_mut(&mut self) -> Result<&mut Sample, &mut Value> { + self.result.as_mut() + } + /// Converts this `Reply` into the its result. Use [`Reply::result`] it you don't want to take ownership. pub fn into_result(self) -> Result { self.result diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index dc13468181..53fea80b10 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -50,18 +50,11 @@ use { }; pub(crate) struct QueryInner { - /// The key expression of this Query. pub(crate) key_expr: KeyExpr<'static>, - /// This Query's selector parameters. pub(crate) parameters: Parameters<'static>, - /// This Query's body. - pub(crate) value: Option, - pub(crate) qid: RequestId, pub(crate) zid: ZenohId, pub(crate) primitives: Arc, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, } impl Drop for QueryInner { @@ -79,6 +72,9 @@ impl Drop for QueryInner { pub struct Query { pub(crate) inner: Arc, pub(crate) eid: EntityId, + pub(crate) value: Option, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, } impl Query { @@ -106,24 +102,43 @@ impl Query { /// This Query's value. #[inline(always)] pub fn value(&self) -> Option<&Value> { - self.inner.value.as_ref() + self.value.as_ref() + } + + /// This Query's value. + #[inline(always)] + pub fn value_mut(&mut self) -> Option<&mut Value> { + self.value.as_mut() } /// This Query's payload. #[inline(always)] pub fn payload(&self) -> Option<&ZBytes> { - self.inner.value.as_ref().map(|v| &v.payload) + self.value.as_ref().map(|v| &v.payload) + } + + /// This Query's payload. + #[inline(always)] + pub fn payload_mut(&mut self) -> Option<&mut ZBytes> { + self.value.as_mut().map(|v| &mut v.payload) } /// This Query's encoding. #[inline(always)] pub fn encoding(&self) -> Option<&Encoding> { - self.inner.value.as_ref().map(|v| &v.encoding) + self.value.as_ref().map(|v| &v.encoding) } + /// This Query's attachment. #[zenoh_macros::unstable] pub fn attachment(&self) -> Option<&ZBytes> { - self.inner.attachment.as_ref() + self.attachment.as_ref() + } + + /// This Query's attachment. + #[zenoh_macros::unstable] + pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { + self.attachment.as_mut() } /// Sends a reply in the form of [`Sample`] to this Query. diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 01fc345c3b..eb70129e55 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1806,10 +1806,6 @@ impl Session { let query_inner = Arc::new(QueryInner { key_expr, parameters: parameters.to_owned().into(), - value: body.map(|b| Value { - payload: b.payload.into(), - encoding: b.encoding.into(), - }), qid, zid, primitives: if local { @@ -1817,13 +1813,17 @@ impl Session { } else { primitives }, - #[cfg(feature = "unstable")] - attachment, }); for (eid, callback) in queryables { callback(Query { inner: query_inner.clone(), eid, + value: body.as_ref().map(|b| Value { + payload: b.payload.clone().into(), + encoding: b.encoding.clone().into(), + }), + #[cfg(feature = "unstable")] + attachment: attachment.clone(), }); } } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index ea084c453b..c13e64f71f 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -462,14 +462,14 @@ impl Primitives for AdminSpace { inner: Arc::new(QueryInner { key_expr: key_expr.clone(), parameters: query.parameters.into(), - value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), qid: msg.id, zid, primitives, - #[cfg(feature = "unstable")] - attachment: query.ext_attachment.map(Into::into), }), eid: self.queryable_id, + value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), + #[cfg(feature = "unstable")] + attachment: query.ext_attachment.map(Into::into), }; for (key, handler) in &self.handlers { From 2dbc20f1f1010ee0e6cdf4fe457ea0ac7c07f9f0 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Sat, 27 Apr 2024 11:57:33 +0300 Subject: [PATCH 305/357] rename payload tests to bytes tests --- zenoh/tests/{payload.rs => bytes.rs} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename zenoh/tests/{payload.rs => bytes.rs} (98%) diff --git a/zenoh/tests/payload.rs b/zenoh/tests/bytes.rs similarity index 98% rename from zenoh/tests/payload.rs rename to zenoh/tests/bytes.rs index 44daadf18c..41e6d14c6e 100644 --- a/zenoh/tests/payload.rs +++ b/zenoh/tests/bytes.rs @@ -14,7 +14,7 @@ #[test] #[cfg(all(feature = "shared-memory", feature = "unstable"))] -fn shm_payload_single_buf() { +fn shm_bytes_single_buf() { use zenoh::prelude::r#async::*; // create an SHM backend... From 6f8f6b745b062cb0b66123e36b3125d2a5a2c780 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Sat, 27 Apr 2024 12:23:22 +0300 Subject: [PATCH 306/357] - fix API exports - fix z_payload_shm example --- examples/examples/z_payload_shm.rs | 9 ++------- zenoh/src/lib.rs | 1 + 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/examples/examples/z_payload_shm.rs b/examples/examples/z_payload_shm.rs index 3b03b80502..4bf45381de 100644 --- a/examples/examples/z_payload_shm.rs +++ b/examples/examples/z_payload_shm.rs @@ -11,16 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::shm::slice::zsliceshm::{zsliceshm, ZSliceShm}; -use zenoh::shm::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; use zenoh::{ bytes::ZBytes, shm::{ - protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, - }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, + zsliceshm, zsliceshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, + ZSliceShm, ZSliceShmMut, POSIX_PROTOCOL_ID, }, }; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3c011e2439..2a238ea875 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -371,6 +371,7 @@ pub mod shm { pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; pub use zenoh_shm::api::{ protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, From fcb0545a8fff945d12d80140e8f5d7e91053d4f4 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 29 Apr 2024 16:40:20 +0200 Subject: [PATCH 307/357] refactor: replace `AsyncResolve` with `IntoFuture` (#942) * refactor: replace `AsyncResolve` with `IntoFuture`, `SyncResolve` with `Wait`, and deprecate old API * fix: fix shared memory * fix: fix remaining test * fix: fix remaining test * Update examples/examples/z_get.rs Co-authored-by: Luca Cominardi * fix: put back (A)SyncResolve in prelude --------- Co-authored-by: Luca Cominardi --- Cargo.lock | 2 +- .../src/pub_sub/bin/z_pub_sub.rs | 15 +- .../src/queryable_get/bin/z_queryable_get.rs | 9 +- commons/zenoh-core/src/lib.rs | 107 +++++++-- commons/zenoh-core/src/macros.rs | 4 +- commons/zenoh-task/src/lib.rs | 6 +- examples/examples/z_alloc_shm.rs | 8 +- examples/examples/z_delete.rs | 8 +- examples/examples/z_forward.rs | 8 +- examples/examples/z_get.rs | 11 +- examples/examples/z_get_liveliness.rs | 5 +- examples/examples/z_info.rs | 10 +- examples/examples/z_liveliness.rs | 15 +- examples/examples/z_ping.rs | 12 +- examples/examples/z_ping_shm.rs | 12 +- examples/examples/z_pong.rs | 10 +- examples/examples/z_pub.rs | 13 +- examples/examples/z_pub_shm.rs | 8 +- examples/examples/z_pub_shm_thr.rs | 15 +- examples/examples/z_pub_thr.rs | 8 +- examples/examples/z_pull.rs | 5 +- examples/examples/z_put.rs | 6 +- examples/examples/z_put_float.rs | 8 +- examples/examples/z_queryable.rs | 10 +- examples/examples/z_scout.rs | 3 +- examples/examples/z_storage.rs | 9 +- examples/examples/z_sub.rs | 6 +- examples/examples/z_sub_liveliness.rs | 5 +- examples/examples/z_sub_shm.rs | 6 +- examples/examples/z_sub_thr.rs | 6 +- .../zenoh-link-unixpipe/src/unix/unicast.rs | 4 +- plugins/zenoh-backend-traits/src/lib.rs | 2 +- plugins/zenoh-plugin-example/src/lib.rs | 9 +- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 11 +- plugins/zenoh-plugin-rest/src/lib.rs | 22 +- .../zenoh-plugin-storage-manager/src/lib.rs | 5 +- .../src/replica/align_queryable.rs | 9 +- .../src/replica/aligner.rs | 3 +- .../src/replica/mod.rs | 12 +- .../src/replica/storage.rs | 7 +- .../tests/operations.rs | 16 +- .../tests/wildcard.rs | 16 +- rust-toolchain.toml | 2 +- zenoh-ext/examples/examples/z_member.rs | 4 +- zenoh-ext/examples/examples/z_pub_cache.rs | 8 +- zenoh-ext/examples/examples/z_query_sub.rs | 6 +- zenoh-ext/examples/examples/z_view_size.rs | 3 +- zenoh-ext/src/group.rs | 14 +- zenoh-ext/src/publication_cache.rs | 30 +-- zenoh-ext/src/querying_subscriber.rs | 84 ++++--- zenoh-ext/src/session_ext.rs | 6 +- zenoh-ext/src/subscriber_ext.rs | 36 ++- zenoh/src/api/admin.rs | 6 +- zenoh/src/api/builders/publication.rs | 84 +++---- zenoh/src/api/info.rs | 87 +++---- zenoh/src/api/key_expr.rs | 24 +- zenoh/src/api/liveliness.rs | 143 ++++++------ zenoh/src/api/publication.rs | 221 +++++++++--------- zenoh/src/api/query.rs | 36 ++- zenoh/src/api/queryable.rs | 128 +++++----- zenoh/src/api/scouting.rs | 43 ++-- zenoh/src/api/session.rs | 164 ++++++------- zenoh/src/api/subscriber.rs | 85 ++++--- zenoh/src/lib.rs | 23 +- zenoh/src/net/runtime/adminspace.rs | 20 +- zenoh/src/prelude.rs | 18 +- zenoh/tests/acl.rs | 141 ++++++----- zenoh/tests/attachments.rs | 22 +- zenoh/tests/connection_retry.rs | 6 +- zenoh/tests/events.rs | 45 ++-- zenoh/tests/handler.rs | 16 +- zenoh/tests/interceptors.rs | 36 +-- zenoh/tests/liveliness.rs | 25 +- zenoh/tests/matching.rs | 99 ++++---- zenoh/tests/payload.rs | 2 +- zenoh/tests/qos.rs | 18 +- zenoh/tests/routing.rs | 19 +- zenoh/tests/session.rs | 112 +++++---- zenoh/tests/shm.rs | 20 +- zenoh/tests/unicity.rs | 111 ++++----- zenohd/src/main.rs | 3 +- 81 files changed, 1148 insertions(+), 1268 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55de3d50f9..db32920bdb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4436,7 +4436,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 0.1.10", "static_assertions", ] diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index d44215cac5..2091f833a1 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -13,7 +13,7 @@ // use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; #[tokio::main] async fn main() { @@ -23,15 +23,11 @@ async fn main() { let sub_key_expr = KeyExpr::try_from("test/valgrind/**").unwrap(); println!("Declaring Publisher on '{pub_key_expr}'..."); - let pub_session = zenoh::open(Config::default()).res().await.unwrap(); - let publisher = pub_session - .declare_publisher(&pub_key_expr) - .res() - .await - .unwrap(); + let pub_session = zenoh::open(Config::default()).await.unwrap(); + let publisher = pub_session.declare_publisher(&pub_key_expr).await.unwrap(); println!("Declaring Subscriber on '{sub_key_expr}'..."); - let sub_session = zenoh::open(Config::default()).res().await.unwrap(); + let sub_session = zenoh::open(Config::default()).await.unwrap(); let _subscriber = sub_session .declare_subscriber(&sub_key_expr) .callback(|sample| { @@ -45,7 +41,6 @@ async fn main() { .unwrap_or_else(|e| format!("{}", e)) ); }) - .res() .await .unwrap(); @@ -53,7 +48,7 @@ async fn main() { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] data"); println!("Putting Data ('{}': '{}')...", &pub_key_expr, buf); - publisher.put(buf).res().await.unwrap(); + publisher.put(buf).await.unwrap(); } tokio::time::sleep(Duration::from_secs(1)).await; diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 364617eb2a..43cb038f94 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -14,7 +14,7 @@ use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; #[tokio::main] async fn main() { @@ -24,7 +24,7 @@ async fn main() { let get_selector = Selector::try_from("test/valgrind/**").unwrap(); println!("Declaring Queryable on '{queryable_key_expr}'..."); - let queryable_session = zenoh::open(Config::default()).res().await.unwrap(); + let queryable_session = zenoh::open(Config::default()).await.unwrap(); let _queryable = queryable_session .declare_queryable(queryable_key_expr.clone()) .callback(move |query| { @@ -33,18 +33,16 @@ async fn main() { zenoh_runtime::ZRuntime::Application.block_in_place(async move { query .reply(queryable_key_expr, query.value().unwrap().payload().clone()) - .res() .await .unwrap(); }); }) .complete(true) - .res() .await .unwrap(); println!("Declaring Get session for '{get_selector}'..."); - let get_session = zenoh::open(Config::default()).res().await.unwrap(); + let get_session = zenoh::open(Config::default()).await.unwrap(); for idx in 0..5 { tokio::time::sleep(Duration::from_secs(1)).await; @@ -53,7 +51,6 @@ async fn main() { .get(&get_selector) .value(idx) .target(QueryTarget::All) - .res() .await .unwrap(); while let Ok(reply) = replies.recv_async().await { diff --git a/commons/zenoh-core/src/lib.rs b/commons/zenoh-core/src/lib.rs index e15ff1d3bf..19cf3751ff 100644 --- a/commons/zenoh-core/src/lib.rs +++ b/commons/zenoh-core/src/lib.rs @@ -20,7 +20,7 @@ pub use lazy_static::lazy_static; pub mod macros; -use std::future::{Future, Ready}; +use std::future::{Future, IntoFuture, Ready}; // Re-exports after moving ZError/ZResult to zenoh-result pub use zenoh_result::{bail, to_zerror, zerror}; @@ -30,12 +30,34 @@ pub mod zresult { pub use zresult::Error; pub use zresult::ZResult as Result; +/// A resolvable execution, either sync or async pub trait Resolvable { type To: Sized + Send; } +/// Trick used to mark `::IntoFuture` bound as Send +#[doc(hidden)] +pub trait IntoSendFuture: Resolvable { + type IntoFuture: Future + Send; +} + +impl IntoSendFuture for T +where + T: Resolvable + IntoFuture, + T::IntoFuture: Send, +{ + type IntoFuture = T::IntoFuture; +} + +/// Synchronous execution of a resolvable +pub trait Wait: Resolvable { + /// Synchronously execute and wait + fn wait(self) -> Self::To; +} + +#[deprecated = "use `.await` directly instead"] pub trait AsyncResolve: Resolvable { - type Future: Future::To> + Send; + type Future: Future + Send; fn res_async(self) -> Self::Future; @@ -47,10 +69,24 @@ pub trait AsyncResolve: Resolvable { } } +#[allow(deprecated)] +impl AsyncResolve for T +where + T: Resolvable + IntoFuture, + T::IntoFuture: Send, +{ + type Future = T::IntoFuture; + + fn res_async(self) -> Self::Future { + self.into_future() + } +} + +#[deprecated = "use `.wait()` instead`"] pub trait SyncResolve: Resolvable { - fn res_sync(self) -> ::To; + fn res_sync(self) -> Self::To; - fn res(self) -> ::To + fn res(self) -> Self::To where Self: Sized, { @@ -58,23 +94,42 @@ pub trait SyncResolve: Resolvable { } } +#[allow(deprecated)] +impl SyncResolve for T +where + T: Wait, +{ + fn res_sync(self) -> Self::To { + self.wait() + } +} + /// Zenoh's trait for resolving builder patterns. /// -/// Builder patterns in Zenoh can be resolved with [`AsyncResolve`] in async context and [`SyncResolve`] in sync context. -/// In both async and sync context calling `.res()` resolves the builder. -/// `.res()` maps to `.res_async()` in async context. -/// `.res()` maps to `.res_sync()` in sync context. -/// We advise to prefer the usage of [`AsyncResolve`] and to use [`SyncResolve`] with caution. -#[must_use = "Resolvables do nothing unless you resolve them using `.res()`."] -pub trait Resolve: Resolvable + SyncResolve + AsyncResolve + Send {} +/// Builder patterns in Zenoh can be resolved by awaiting them, in async context, +/// and [`Wait::wait`] in sync context. +/// We advise to prefer the usage of asynchronous execution, and to use synchronous one with caution +#[must_use = "Resolvables do nothing unless you resolve them using `.await` or synchronous `.wait()` method"] +pub trait Resolve: + Resolvable + + Wait + + IntoSendFuture + + IntoFuture::IntoFuture, Output = Output> + + Send +{ +} impl Resolve for T where - T: Resolvable + SyncResolve + AsyncResolve + Send + T: Resolvable + + Wait + + IntoSendFuture + + IntoFuture::IntoFuture, Output = Output> + + Send { } // Closure to wait -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[must_use = "Resolvables do nothing unless you resolve them using `.await` or synchronous `.wait()` method"] pub struct ResolveClosure(C) where To: Sized + Send, @@ -98,30 +153,31 @@ where type To = To; } -impl AsyncResolve for ResolveClosure +impl IntoFuture for ResolveClosure where To: Sized + Send, C: FnOnce() -> To + Send, { - type Future = Ready<::To>; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl SyncResolve for ResolveClosure +impl Wait for ResolveClosure where To: Sized + Send, C: FnOnce() -> To + Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { self.0() } } // Future to wait -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[must_use = "Resolvables do nothing unless you resolve them using `.await` or synchronous `.wait()` method"] pub struct ResolveFuture(F) where To: Sized + Send, @@ -145,24 +201,25 @@ where type To = To; } -impl AsyncResolve for ResolveFuture +impl IntoFuture for ResolveFuture where To: Sized + Send, F: Future + Send, { - type Future = F; + type Output = To; + type IntoFuture = F; - fn res_async(self) -> Self::Future { + fn into_future(self) -> Self::IntoFuture { self.0 } } -impl SyncResolve for ResolveFuture +impl Wait for ResolveFuture where To: Sized + Send, F: Future + Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { zenoh_runtime::ZRuntime::Application.block_in_place(self.0) } } diff --git a/commons/zenoh-core/src/macros.rs b/commons/zenoh-core/src/macros.rs index d8f2f1fdc3..f20f22f41a 100644 --- a/commons/zenoh-core/src/macros.rs +++ b/commons/zenoh-core/src/macros.rs @@ -233,6 +233,8 @@ macro_rules! zcondfeat { #[macro_export] macro_rules! ztimeout { ($f:expr) => { - tokio::time::timeout(TIMEOUT, $f).await.unwrap() + tokio::time::timeout(TIMEOUT, ::core::future::IntoFuture::into_future($f)) + .await + .unwrap() }; } diff --git a/commons/zenoh-task/src/lib.rs b/commons/zenoh-task/src/lib.rs index 5f7c3c26d2..d41eb50f34 100644 --- a/commons/zenoh-task/src/lib.rs +++ b/commons/zenoh-task/src/lib.rs @@ -24,7 +24,7 @@ use std::time::Duration; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tokio_util::task::TaskTracker; -use zenoh_core::{ResolveFuture, SyncResolve}; +use zenoh_core::{ResolveFuture, Wait}; use zenoh_runtime::ZRuntime; #[derive(Clone)] @@ -111,7 +111,7 @@ impl TaskController { /// The call blocks until all tasks yield or timeout duration expires. /// Returns 0 in case of success, number of non terminated tasks otherwise. pub fn terminate_all(&self, timeout: Duration) -> usize { - ResolveFuture::new(async move { self.terminate_all_async(timeout).await }).res_sync() + ResolveFuture::new(async move { self.terminate_all_async(timeout).await }).wait() } /// Async version of [`TaskController::terminate_all()`]. @@ -176,7 +176,7 @@ impl TerminatableTask { /// Attempts to terminate the task. /// Returns true if task completed / aborted within timeout duration, false otherwise. pub fn terminate(self, timeout: Duration) -> bool { - ResolveFuture::new(async move { self.terminate_async(timeout).await }).res_sync() + ResolveFuture::new(async move { self.terminate_async(timeout).await }).wait() } /// Async version of [`TerminatableTask::terminate()`]. diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index 34e1c07058..acff39379c 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; #[tokio::main] async fn main() { @@ -120,9 +120,9 @@ async fn run() -> ZResult<()> { sbuf[0..8].fill(0); // Declare Session and Publisher (common code) - let session = zenoh::open(Config::default()).res_async().await?; - let publisher = session.declare_publisher("my/key/expr").res_async().await?; + let session = zenoh::open(Config::default()).await?; + let publisher = session.declare_publisher("my/key/expr").await?; // Publish SHM buffer - publisher.put(sbuf).res_async().await + publisher.put(sbuf).await } diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index 7ee8c75421..4fbb46367c 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,12 +23,12 @@ async fn main() { let (config, key_expr) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Deleting resources matching '{key_expr}'..."); - session.delete(&key_expr).res().await.unwrap(); + session.delete(&key_expr).await.unwrap(); - session.close().res().await.unwrap(); + session.close().await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 000b0f97ff..22a6ef4229 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; @@ -24,12 +24,12 @@ async fn main() { let (config, key_expr, forward) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{key_expr}'..."); - let mut subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let mut subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Declaring Publisher on '{forward}'..."); - let publisher = session.declare_publisher(&forward).res().await.unwrap(); + let publisher = session.declare_publisher(&forward).await.unwrap(); println!("Forwarding data from '{key_expr}' to '{forward}'..."); subscriber.forward(publisher).await.unwrap(); } diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 56693d9fa1..6b6326ebcf 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -24,19 +24,18 @@ async fn main() { let (config, selector, value, target, timeout) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Sending Query '{selector}'..."); let replies = session .get(&selector) - // // By default get receives replies from a FIFO. - // // Uncomment this line to use a ring channel instead. + // // By default get receives replies from a FIFO. + // // Uncomment this line to use a ring channel instead. // // More information on the ring channel are available in the z_pull example. - .with(zenoh::handlers::RingChannel::default()) + // .with(zenoh::handlers::RingChannel::default()) .value(value) .target(target) .timeout(timeout) - .res() .await .unwrap(); while let Ok(reply) = replies.recv_async().await { diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index bd8e62a78c..43747697b6 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -24,14 +24,13 @@ async fn main() { let (config, key_expr, timeout) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Sending Liveliness Query '{key_expr}'..."); let replies = session .liveliness() .get(&key_expr) .timeout(timeout) - .res() .await .unwrap(); while let Ok(reply) = replies.recv_async().await { diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index adde62f808..db28970897 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,17 +23,17 @@ async fn main() { let config = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); let info = session.info(); - println!("zid: {}", info.zid().res().await); + println!("zid: {}", info.zid().await); println!( "routers zid: {:?}", - info.routers_zid().res().await.collect::>() + info.routers_zid().await.collect::>() ); println!( "peers zid: {:?}", - info.peers_zid().res().await.collect::>() + info.peers_zid().await.collect::>() ); } diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 1c78d3ad24..cee7a29376 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,17 +23,10 @@ async fn main() { let (config, key_expr) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring LivelinessToken on '{}'...", &key_expr); - let mut token = Some( - session - .liveliness() - .declare_token(&key_expr) - .res() - .await - .unwrap(), - ); + let mut token = Some(session.liveliness().declare_token(&key_expr).await.unwrap()); println!("Press CTRL-C to undeclare LivelinessToken and quit..."); std::thread::park(); @@ -41,7 +34,7 @@ async fn main() { // Use the code below to manually undeclare it if needed if let Some(token) = token.take() { println!("Undeclaring LivelinessToken..."); - token.undeclare().res().await.unwrap(); + token.undeclare().await.unwrap(); }; } diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index af1e9c977d..81181f1a81 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::{Duration, Instant}; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; fn main() { @@ -21,7 +21,7 @@ fn main() { zenoh_util::try_init_log_from_env(); let (config, warmup, size, n, express) = parse_args(); - let session = zenoh::open(config).res().unwrap(); + let session = zenoh::open(config).wait().unwrap(); // The key expression to publish data on let key_expr_ping = keyexpr::new("test/ping").unwrap(); @@ -29,12 +29,12 @@ fn main() { // The key expression to wait the response back let key_expr_pong = keyexpr::new("test/pong").unwrap(); - let sub = session.declare_subscriber(key_expr_pong).res().unwrap(); + let sub = session.declare_subscriber(key_expr_pong).wait().unwrap(); let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) .express(express) - .res() + .wait() .unwrap(); let data: ZBytes = (0usize..size) @@ -49,7 +49,7 @@ fn main() { let now = Instant::now(); while now.elapsed() < warmup { let data = data.clone(); - publisher.put(data).res().unwrap(); + publisher.put(data).wait().unwrap(); let _ = sub.recv(); } @@ -57,7 +57,7 @@ fn main() { for _ in 0..n { let data = data.clone(); let write_time = Instant::now(); - publisher.put(data).res().unwrap(); + publisher.put(data).wait().unwrap(); let _ = sub.recv(); let ts = write_time.elapsed().as_micros(); diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 98d9bae825..7a7bd61580 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::{Duration, Instant}; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; fn main() { @@ -27,7 +27,7 @@ fn main() { // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. config.transport.shared_memory.set_enabled(true).unwrap(); - let session = zenoh::open(config).res().unwrap(); + let session = zenoh::open(config).wait().unwrap(); // The key expression to publish data on let key_expr_ping = keyexpr::new("test/ping").unwrap(); @@ -35,11 +35,11 @@ fn main() { // The key expression to wait the response back let key_expr_pong = keyexpr::new("test/pong").unwrap(); - let sub = session.declare_subscriber(key_expr_pong).res().unwrap(); + let sub = session.declare_subscriber(key_expr_pong).wait().unwrap(); let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) - .res() + .wait() .unwrap(); let mut samples = Vec::with_capacity(n); @@ -87,14 +87,14 @@ fn main() { println!("Warming up for {warmup:?}..."); let now = Instant::now(); while now.elapsed() < warmup { - publisher.put(buf.clone()).res().unwrap(); + publisher.put(buf.clone()).wait().unwrap(); let _ = sub.recv().unwrap(); } for _ in 0..n { let buf = buf.clone(); let write_time = Instant::now(); - publisher.put(buf).res().unwrap(); + publisher.put(buf).wait().unwrap(); let _ = sub.recv(); let ts = write_time.elapsed().as_micros(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index b2fc075c10..7d7b60b6e9 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; fn main() { @@ -26,7 +26,7 @@ fn main() { // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. config.transport.shared_memory.set_enabled(true).unwrap(); - let session = zenoh::open(config).res().unwrap().into_arc(); + let session = zenoh::open(config).wait().unwrap().into_arc(); // The key expression to read the data from let key_expr_ping = keyexpr::new("test/ping").unwrap(); @@ -38,13 +38,13 @@ fn main() { .declare_publisher(key_expr_pong) .congestion_control(CongestionControl::Block) .express(express) - .res() + .wait() .unwrap(); let _sub = session .declare_subscriber(key_expr_ping) - .callback(move |sample| publisher.put(sample.payload().clone()).res().unwrap()) - .res() + .callback(move |sample| publisher.put(sample.payload().clone()).wait().unwrap()) + .wait() .unwrap(); std::thread::park(); } diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 79de0e61d4..7c2c9f2c65 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -24,22 +24,17 @@ async fn main() { let (config, key_expr, value, attachment) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Publisher on '{key_expr}'..."); - let publisher = session.declare_publisher(&key_expr).res().await.unwrap(); + let publisher = session.declare_publisher(&key_expr).await.unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {value}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); - publisher - .put(buf) - .attachment(&attachment) - .res() - .await - .unwrap(); + publisher.put(buf).attachment(&attachment).await.unwrap(); } } diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 79527c3e5f..92d19b6b06 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; const N: usize = 10; @@ -31,7 +31,7 @@ async fn main() -> Result<(), ZError> { config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Creating POSIX SHM backend..."); // Construct an SHM backend @@ -62,7 +62,7 @@ async fn main() -> Result<(), ZError> { .backend(backend) .res(); - let publisher = session.declare_publisher(&path).res().await.unwrap(); + let publisher = session.declare_publisher(&path).await.unwrap(); println!("Allocating Shared Memory Buffer..."); let layout = shared_memory_provider @@ -95,7 +95,7 @@ async fn main() -> Result<(), ZError> { path, String::from_utf8_lossy(&sbuf[0..slice_len]) ); - publisher.put(sbuf).res().await?; + publisher.put(sbuf).await?; } Ok(()) diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 70a0bf0548..0b94304321 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -26,7 +26,7 @@ async fn main() { // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. config.transport.shared_memory.set_enabled(true).unwrap(); - let z = zenoh::open(config).res().await.unwrap(); + let z = zenoh::open(config).await.unwrap(); // Construct an SHM backend let backend = { @@ -68,15 +68,18 @@ async fn main() { *b = rand::random::(); } - let publisher = z.declare_publisher("test/thr") - // Make sure to not drop messages because of congestion control - .congestion_control(CongestionControl::Block).res().await.unwrap(); + let publisher = z + .declare_publisher("test/thr") + // Make sure to not drop messages because of congestion control + .congestion_control(CongestionControl::Block) + .await + .unwrap(); let buf: ZSlice = buf.into(); println!("Press CTRL-C to quit..."); loop { - publisher.put(buf.clone()).res().await.unwrap(); + publisher.put(buf.clone()).await.unwrap(); } } diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 5625c1b91d..5eb4f9e96e 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -14,7 +14,7 @@ use clap::Parser; use std::convert::TryInto; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; fn main() { @@ -34,21 +34,21 @@ fn main() { .collect::>() .into(); - let session = zenoh::open(args.common).res().unwrap(); + let session = zenoh::open(args.common).wait().unwrap(); let publisher = session .declare_publisher("test/thr") .congestion_control(CongestionControl::Block) .priority(prio) .express(args.express) - .res() + .wait() .unwrap(); println!("Press CTRL-C to quit..."); let mut count: usize = 0; let mut start = std::time::Instant::now(); loop { - publisher.put(data.clone()).res().unwrap(); + publisher.put(data.clone()).wait().unwrap(); if args.print { if count < args.number { diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 349779e574..55f211f111 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Duration; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -24,13 +24,12 @@ async fn main() { let (config, key_expr, size, interval) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{key_expr}'..."); let subscriber = session .declare_subscriber(&key_expr) .with(RingChannel::new(size)) - .res() .await .unwrap(); diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index bb1274a638..5d68d205f9 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,10 +23,10 @@ async fn main() { let (config, key_expr, value) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Putting Data ('{key_expr}': '{value}')..."); - session.put(&key_expr, value).res().await.unwrap(); + session.put(&key_expr, value).await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index b9c2a4e019..97e4abd69d 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,12 +23,12 @@ async fn main() { let (config, key_expr, value) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Putting Float ('{key_expr}': '{value}')..."); - session.put(&key_expr, value).res().await.unwrap(); + session.put(&key_expr, value).await.unwrap(); - session.close().res().await.unwrap(); + session.close().await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Debug)] diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 47f70c30c3..e24b8e80cb 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,17 +23,16 @@ async fn main() { let (config, key_expr, value, complete) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Queryable on '{key_expr}'..."); let queryable = session .declare_queryable(&key_expr) - // // By default queryable receives queries from a FIFO. - // // Uncomment this line to use a ring channel instead. + // // By default queryable receives queries from a FIFO. + // // Uncomment this line to use a ring channel instead. // // More information on the ring channel are available in the z_pull example. // .with(zenoh::handlers::RingChannel::default()) .complete(complete) - .res() .await .unwrap(); @@ -60,7 +59,6 @@ async fn main() { ); query .reply(key_expr.clone(), value.clone()) - .res() .await .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); } diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index 5ac06f37d4..bcd65ffb0e 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; #[tokio::main] async fn main() { @@ -20,7 +20,6 @@ async fn main() { println!("Scouting..."); let receiver = scout(WhatAmI::Peer | WhatAmI::Router, Config::default()) - .res() .await .unwrap(); diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index fd4337535c..2b03e32d06 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -16,7 +16,7 @@ use clap::Parser; use futures::select; use std::collections::HashMap; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -29,16 +29,15 @@ async fn main() { let mut stored: HashMap = HashMap::new(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{key_expr}'..."); - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Declaring Queryable on '{key_expr}'..."); let queryable = session .declare_queryable(&key_expr) .complete(complete) - .res() .await .unwrap(); @@ -60,7 +59,7 @@ async fn main() { println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { if query.selector().key_expr().intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { - query.reply(sample.key_expr().clone(), sample.payload().clone()).res().await.unwrap(); + query.reply(sample.key_expr().clone(), sample.payload().clone()).await.unwrap(); } } } diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index d2cc370306..156968eb36 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -28,10 +28,10 @@ async fn main() { config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{}'...", &key_expr); - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 1df5b9422e..af2c02342d 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -23,14 +23,13 @@ async fn main() { let (config, key_expr) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Liveliness Subscriber on '{}'...", &key_expr); let subscriber = session .liveliness() .declare_subscriber(&key_expr) - .res() .await .unwrap(); diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 282fd8c776..5f5c77633f 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -13,7 +13,7 @@ // use clap::Parser; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -29,10 +29,10 @@ async fn main() { config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{}'...", &key_expr); - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 88105ca8aa..6913a7bf08 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -13,7 +13,7 @@ // use clap::Parser; use std::time::Instant; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; use zenoh_examples::CommonArgs; struct Stats { @@ -77,7 +77,7 @@ fn main() { // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. config.transport.shared_memory.set_enabled(true).unwrap(); - let session = zenoh::open(config).res().unwrap(); + let session = zenoh::open(config).wait().unwrap(); let key_expr = "test/thr"; @@ -90,7 +90,7 @@ fn main() { std::process::exit(0) } }) - .res() + .wait() .unwrap(); println!("Press CTRL-C to quit..."); diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 12543b31a1..ea90630523 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -32,7 +32,7 @@ use tokio::io::unix::AsyncFd; use tokio::io::Interest; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; -use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, SyncResolve}; +use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, Wait}; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_protocol::transport::BatchSize; use zenoh_runtime::ZRuntime; @@ -331,7 +331,7 @@ impl UnicastPipeListener { fn stop_listening(self) { self.token.cancel(); - let _ = ResolveFuture::new(self.handle).res_sync(); + let _ = ResolveFuture::new(self.handle).wait(); } } diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 5db79b57bd..761f653064 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -29,7 +29,7 @@ //! ``` //! use std::sync::Arc; //! use async_trait::async_trait; -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! use zenoh_backend_traits::*; //! use zenoh_backend_traits::config::*; //! diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index f1deae363d..3c84e039a8 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -28,7 +28,6 @@ use zenoh::runtime::Runtime; use zenoh::sample::Sample; use zenoh::session::SessionDeclarations; use zenoh_core::zlock; -use zenoh_core::AsyncResolve; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; @@ -147,7 +146,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { zenoh_util::try_init_log_from_env(); // create a zenoh Session that shares the same Runtime than zenohd - let session = zenoh::session::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); // the HasMap used as a storage by this example of storage plugin let mut stored: HashMap = HashMap::new(); @@ -156,11 +155,11 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // This storage plugin subscribes to the selector and will store in HashMap the received samples debug!("Create Subscriber on {}", selector); - let sub = session.declare_subscriber(&selector).res().await.unwrap(); + let sub = session.declare_subscriber(&selector).await.unwrap(); // This storage plugin declares a Queryable that will reply to queries with the samples stored in the HashMap debug!("Create Queryable on {}", selector); - let queryable = session.declare_queryable(&selector).res().await.unwrap(); + let queryable = session.declare_queryable(&selector).await.unwrap(); // Plugin's event loop, while the flag is true while flag.load(Relaxed) { @@ -178,7 +177,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr().intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { - query.reply_sample(sample.clone()).res().await.unwrap(); + query.reply_sample(sample.clone()).await.unwrap(); } } } diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 5e5485d0d2..59562391ea 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -14,7 +14,7 @@ use clap::{arg, Command}; use std::time::Duration; use zenoh::config::Config; -use zenoh::core::{try_init_log_from_env, AsyncResolve}; +use zenoh::core::try_init_log_from_env; use zenoh::key_expr::keyexpr; use zenoh::publication::CongestionControl; use zenoh::sample::QoSBuilderTrait; @@ -43,16 +43,16 @@ async fn main() { let value = "Pub from sse server!"; println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Queryable on '{key}'..."); - let queryable = session.declare_queryable(key).res().await.unwrap(); + let queryable = session.declare_queryable(key).await.unwrap(); async_std::task::spawn({ let receiver = queryable.handler().clone(); async move { while let Ok(request) = receiver.recv_async().await { - request.reply(key, HTML).res().await.unwrap(); + request.reply(key, HTML).await.unwrap(); } } }); @@ -63,7 +63,6 @@ async fn main() { let publisher = session .declare_publisher(&event_key) .congestion_control(CongestionControl::Block) - .res() .await .unwrap(); @@ -74,7 +73,7 @@ async fn main() { println!("Data updates are accessible through HTML5 SSE at http://:8000/{key}"); loop { - publisher.put(value).res().await.unwrap(); + publisher.put(value).await.unwrap(); async_std::task::sleep(Duration::from_secs(1)).await; } } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 7fe591e3f7..c712a1add6 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -30,7 +30,7 @@ use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; use zenoh::bytes::{StringOrBase64, ZBytes}; -use zenoh::core::{try_init_log_from_env, AsyncResolve}; +use zenoh::core::try_init_log_from_env; use zenoh::encoding::Encoding; use zenoh::key_expr::{keyexpr, KeyExpr}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; @@ -350,13 +350,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result, String)>) -> tide::Result, String)>) -> tide::Result, String)>) -> tide::Result { if raw { Ok(to_raw_response(receiver).await) @@ -470,8 +464,8 @@ async fn write(mut req: Request<(Arc, String)>) -> tide::Result session.put(&key_expr, bytes).encoding(encoding).res().await, - SampleKind::Delete => session.delete(&key_expr).res().await, + SampleKind::Put => session.put(&key_expr, bytes).encoding(encoding).await, + SampleKind::Delete => session.delete(&key_expr).await, }; match res { Ok(_) => Ok(Response::new(StatusCode::Ok)), @@ -497,7 +491,7 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { try_init_log_from_env(); let zid = runtime.zid().to_string(); - let session = zenoh::session::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); let mut app = Server::with_state((Arc::new(session), zid)); app.with( diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index e5ca51c5ef..8818d44688 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -29,7 +29,6 @@ use std::sync::Mutex; use storages_mgt::StorageMessage; use zenoh::core::try_init_log_from_env; use zenoh::core::Result as ZResult; -use zenoh::core::SyncResolve; use zenoh::internal::zlock; use zenoh::internal::LibLoader; use zenoh::key_expr::keyexpr; @@ -51,6 +50,8 @@ use zenoh_plugin_trait::PluginStatusRec; mod backends_mgt; use backends_mgt::*; +use zenoh::prelude::Wait; + mod memory_backend; mod replica; mod storages_mgt; @@ -117,7 +118,7 @@ impl StorageRuntimeInner { let plugins_manager = PluginsManager::dynamic(lib_loader.clone(), BACKEND_LIB_PREFIX) .declare_static_plugin::(true); - let session = Arc::new(zenoh::session::init(runtime.clone()).res_sync()?); + let session = Arc::new(zenoh::session::init(runtime.clone()).wait()?); // After this moment result should be only Ok. Failure of loading of one voulme or storage should not affect others. diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index c24639b6ca..694e259a18 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,7 +18,7 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; pub struct AlignQueryable { session: Arc, @@ -68,7 +68,6 @@ impl AlignQueryable { .session .declare_queryable(&self.digest_key) .complete(true) // This queryable is meant to have all the history - .res() .await .unwrap(); @@ -97,7 +96,6 @@ impl AlignQueryable { query.key_expr().clone(), serde_json::to_string(&(i, c)).unwrap(), ) - .res() .await .unwrap(); } @@ -107,7 +105,6 @@ impl AlignQueryable { query.key_expr().clone(), serde_json::to_string(&(i, c)).unwrap(), ) - .res() .await .unwrap(); } @@ -117,7 +114,6 @@ impl AlignQueryable { query.key_expr().clone(), serde_json::to_string(&(i, c)).unwrap(), ) - .res() .await .unwrap(); } @@ -126,7 +122,6 @@ impl AlignQueryable { .reply(k, v.payload().clone()) .encoding(v.encoding().clone()) .timestamp(ts) - .res() .await .unwrap(); } @@ -226,7 +221,7 @@ impl AlignQueryable { impl AlignQueryable { async fn get_entry(&self, logentry: &LogEntry) -> Option { // get corresponding key from log - let replies = self.session.get(&logentry.key).res().await.unwrap(); + let replies = self.session.get(&logentry.key).await.unwrap(); if let Ok(reply) = replies.recv_async().await { match reply.into_result() { Ok(sample) => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 3a9fd00558..46ccdc2935 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -18,7 +18,7 @@ use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; pub struct Aligner { session: Arc, @@ -322,7 +322,6 @@ impl Aligner { .get(&selector) .consolidation(zenoh::query::ConsolidationMode::None) .accept_replies(zenoh::query::ReplyKeyExpr::Any) - .res() .await { Ok(replies) => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index b951b23336..c9d9e03bcf 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,7 +26,7 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; pub mod align_queryable; @@ -206,7 +206,6 @@ impl Replica { .session .declare_subscriber(&digest_key) .allowed_origin(Locality::Remote) - .res() .await .unwrap(); loop { @@ -265,12 +264,7 @@ impl Replica { .unwrap(); tracing::debug!("[DIGEST_PUB] Declaring Publisher on '{}'...", digest_key); - let publisher = self - .session - .declare_publisher(digest_key) - .res() - .await - .unwrap(); + let publisher = self.session.declare_publisher(digest_key).await.unwrap(); // Ensure digest gets published every interval, accounting for // time it takes to publish. @@ -287,7 +281,7 @@ impl Replica { drop(digest); tracing::trace!("[DIGEST_PUB] Putting Digest: {} ...", digest_json); - match publisher.put(digest_json).res().await { + match publisher.put(digest_json).await { Ok(()) => {} Err(e) => tracing::error!("[DIGEST_PUB] Digest publication failed: {}", e), } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index ba078c0012..476893539e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -23,7 +23,6 @@ use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; use zenoh::buffers::SplitBuffer; use zenoh::buffers::ZBuf; -use zenoh::core::AsyncResolve; use zenoh::internal::bail; use zenoh::internal::{zenoh_home, Timed, TimedEvent, Timer}; use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; @@ -144,7 +143,7 @@ impl StorageService { t.add_async(gc).await; // subscribe on key_expr - let storage_sub = match self.session.declare_subscriber(&self.key_expr).res().await { + let storage_sub = match self.session.declare_subscriber(&self.key_expr).await { Ok(storage_sub) => storage_sub, Err(e) => { tracing::error!("Error starting storage '{}': {}", self.name, e); @@ -157,7 +156,6 @@ impl StorageService { .session .declare_queryable(&self.key_expr) .complete(self.complete) - .res() .await { Ok(storage_queryable) => storage_queryable, @@ -522,7 +520,6 @@ impl StorageService { .reply(key.clone(), entry.value.payload().clone()) .encoding(entry.value.encoding().clone()) .timestamp(entry.timestamp) - .res() .await { tracing::warn!( @@ -556,7 +553,6 @@ impl StorageService { .reply(q.key_expr().clone(), entry.value.payload().clone()) .encoding(entry.value.encoding().clone()) .timestamp(entry.timestamp) - .res() .await { tracing::warn!( @@ -644,7 +640,6 @@ impl StorageService { .get(Selector::new(&self.key_expr, "_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) - .res() .await { Ok(replies) => replies, diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 0678431b7e..b5384e13be 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -21,29 +21,23 @@ use std::thread::sleep; use async_std::task; use zenoh::internal::zasync_executor_init; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { println!("Putting Data ('{key_expr}': '{value}')..."); // @TODO: how to add timestamp metadata with put, not manipulating sample... - session.put(key_expr, value).res().await.unwrap(); + session.put(key_expr, value).await.unwrap(); } async fn delete_data(session: &Session, key_expr: &str, _timestamp: Timestamp) { println!("Deleting Data '{key_expr}'..."); // @TODO: how to add timestamp metadata with delete, not manipulating sample... - session.delete(key_expr).res().await.unwrap(); + session.delete(key_expr).await.unwrap(); } async fn get_data(session: &Session, key_expr: &str) -> Vec { - let replies: Vec = session - .get(key_expr) - .res() - .await - .unwrap() - .into_iter() - .collect(); + let replies: Vec = session.get(key_expr).await.unwrap().into_iter().collect(); println!("Getting replies on '{key_expr}': '{replies:?}'..."); let mut samples = Vec::new(); for reply in replies { @@ -80,7 +74,7 @@ async fn test_updates_in_order() { let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); - let session = zenoh::session::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); sleep(std::time::Duration::from_secs(1)); diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 72fa62f3ca..bd38e834d7 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -22,29 +22,23 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; use zenoh::internal::zasync_executor_init; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { println!("Putting Data ('{key_expr}': '{value}')..."); // @TODO: how to add timestamp metadata with put, not manipulating sample... - session.put(key_expr, value).res().await.unwrap(); + session.put(key_expr, value).await.unwrap(); } async fn delete_data(session: &Session, key_expr: &str, _timestamp: Timestamp) { println!("Deleting Data '{key_expr}'..."); // @TODO: how to add timestamp metadata with delete, not manipulating sample... - session.delete(key_expr).res().await.unwrap(); + session.delete(key_expr).await.unwrap(); } async fn get_data(session: &Session, key_expr: &str) -> Vec { - let replies: Vec = session - .get(key_expr) - .res() - .await - .unwrap() - .into_iter() - .collect(); + let replies: Vec = session.get(key_expr).await.unwrap().into_iter().collect(); println!("Getting replies on '{key_expr}': '{replies:?}'..."); let mut samples = Vec::new(); for reply in replies { @@ -81,7 +75,7 @@ async fn test_wild_card_in_order() { let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); - let session = zenoh::session::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); sleep(std::time::Duration::from_secs(1)); // put *, ts: 1 diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 743f7cd993..b7eadd649b 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.72.0" +channel = "1.72.0" \ No newline at end of file diff --git a/zenoh-ext/examples/examples/z_member.rs b/zenoh-ext/examples/examples/z_member.rs index 2dc1a242c9..35513b1b56 100644 --- a/zenoh-ext/examples/examples/z_member.rs +++ b/zenoh-ext/examples/examples/z_member.rs @@ -14,13 +14,13 @@ use futures::StreamExt; use std::sync::Arc; use std::time::Duration; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_ext::group::*; #[tokio::main] async fn main() { zenoh_util::try_init_log_from_env(); - let z = Arc::new(zenoh::open(Config::default()).res().await.unwrap()); + let z = Arc::new(zenoh::open(Config::default()).await.unwrap()); let member = Member::new(z.zid().to_string()) .unwrap() .lease(Duration::from_secs(3)); diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 58eb7962c9..09c888cb0b 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -14,7 +14,7 @@ use clap::{arg, Parser}; use std::time::Duration; use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; @@ -26,7 +26,7 @@ async fn main() { let (config, key_expr, value, history, prefix, complete) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring PublicationCache on {}", &key_expr); let mut publication_cache_builder = session @@ -36,14 +36,14 @@ async fn main() { if let Some(prefix) = prefix { publication_cache_builder = publication_cache_builder.queryable_prefix(prefix); } - let _publication_cache = publication_cache_builder.res().await.unwrap(); + let _publication_cache = publication_cache_builder.await.unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {value}"); println!("Put Data ('{}': '{}')", &key_expr, buf); - session.put(&key_expr, buf).res().await.unwrap(); + session.put(&key_expr, buf).await.unwrap(); } } diff --git a/zenoh-ext/examples/examples/z_query_sub.rs b/zenoh-ext/examples/examples/z_query_sub.rs index b34a5771a7..a735ecec66 100644 --- a/zenoh-ext/examples/examples/z_query_sub.rs +++ b/zenoh-ext/examples/examples/z_query_sub.rs @@ -14,7 +14,7 @@ use clap::arg; use clap::Parser; use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; @@ -26,7 +26,7 @@ async fn main() { let (config, key_expr, query) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!( "Declaring QueryingSubscriber on {} with an initial query on {}", @@ -39,14 +39,12 @@ async fn main() { .querying() .query_selector(&selector) .query_accept_replies(ReplyKeyExpr::Any) - .res() .await .unwrap() } else { session .declare_subscriber(key_expr) .querying() - .res() .await .unwrap() }; diff --git a/zenoh-ext/examples/examples/z_view_size.rs b/zenoh-ext/examples/examples/z_view_size.rs index 66e79cd301..52e78790bb 100644 --- a/zenoh-ext/examples/examples/z_view_size.rs +++ b/zenoh-ext/examples/examples/z_view_size.rs @@ -15,7 +15,6 @@ use clap::{arg, Parser}; use std::sync::Arc; use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; use zenoh_ext::group::*; use zenoh_ext_examples::CommonArgs; @@ -25,7 +24,7 @@ async fn main() { let (config, group_name, id, size, timeout) = parse_args(); - let z = Arc::new(zenoh::open(config).res().await.unwrap()); + let z = Arc::new(zenoh::open(config).await.unwrap()); let member_id = id.unwrap_or_else(|| z.zid().to_string()); let member = Member::new(member_id.as_str()) .unwrap() diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 1bf37f365c..d764e5ed9c 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,7 +25,7 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; use zenoh::internal::{bail, Condition, TaskController}; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; @@ -177,7 +177,7 @@ async fn keep_alive_task(state: Arc) { loop { tokio::time::sleep(period).await; tracing::trace!("Sending Keep Alive for: {}", &state.local_member.mid); - let _ = state.group_publisher.put(buf.clone()).res().await; + let _ = state.group_publisher.put(buf.clone()).await; } } @@ -219,18 +219,17 @@ async fn query_handler(z: Arc, state: Arc) { .unwrap(); tracing::debug!("Started query handler for: {}", &qres); let buf = bincode::serialize(&state.local_member).unwrap(); - let queryable = z.declare_queryable(&qres).res().await.unwrap(); + let queryable = z.declare_queryable(&qres).await.unwrap(); while let Ok(query) = queryable.recv_async().await { tracing::trace!("Serving query for: {}", &qres); - query.reply(qres.clone(), buf.clone()).res().await.unwrap(); + query.reply(qres.clone(), buf.clone()).await.unwrap(); } } async fn net_event_handler(z: Arc, state: Arc) { let sub = z .declare_subscriber(state.group_publisher.key_expr()) - .res() .await .unwrap(); while let Ok(s) = sub.recv_async().await { @@ -288,7 +287,7 @@ async fn net_event_handler(z: Arc, state: Arc) { // @TODO: we could also send this member info let qc = ConsolidationMode::None; tracing::trace!("Issuing Query for {}", &qres); - let receiver = z.get(&qres).consolidation(qc).res().await.unwrap(); + let receiver = z.get(&qres).consolidation(qc).await.unwrap(); while let Ok(reply) = receiver.recv_async().await { match reply.result() { @@ -358,7 +357,6 @@ impl Group { let publisher = z .declare_publisher(event_expr) .priority(with.priority) - .res() .await .unwrap(); let state = Arc::new(GroupState { @@ -375,7 +373,7 @@ impl Group { tracing::debug!("Sending Join Message for local member: {:?}", &with); let join_evt = GroupNetEvent::Join(JoinEvent { member: with }); let buf = bincode::serialize(&join_evt).unwrap(); - let _ = state.group_publisher.put(buf).res().await; + let _ = state.group_publisher.put(buf).await; let task_controller = TaskController::default(); // If the liveliness is manual it is the user who has to assert it. diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 7080b44ac4..11fb8fb72a 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -13,12 +13,13 @@ // use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; -use std::future::Ready; +use std::future::{IntoFuture, Ready}; use std::time::Duration; use zenoh::core::Error; -use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh::core::{Resolvable, Resolve}; use zenoh::internal::{ResolveFuture, TerminatableTask}; use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; +use zenoh::prelude::Wait; use zenoh::queryable::{Query, Queryable}; use zenoh::runtime::ZRuntime; use zenoh::sample::{Locality, Sample}; @@ -96,17 +97,18 @@ impl<'a> Resolvable for PublicationCacheBuilder<'a, '_, '_> { type To = ZResult>; } -impl SyncResolve for PublicationCacheBuilder<'_, '_, '_> { - fn res_sync(self) -> ::To { +impl Wait for PublicationCacheBuilder<'_, '_, '_> { + fn wait(self) -> ::To { PublicationCache::new(self) } } -impl<'a> AsyncResolve for PublicationCacheBuilder<'a, '_, '_> { - type Future = Ready; +impl<'a> IntoFuture for PublicationCacheBuilder<'a, '_, '_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -149,7 +151,7 @@ impl<'a> PublicationCache<'a> { .session .declare_subscriber(&key_expr) .allowed_origin(Locality::SessionLocal) - .res_sync()?; + .wait()?; // declare the queryable which returns the cached publications let mut queryable = conf.session.declare_queryable(&queryable_key_expr); @@ -159,7 +161,7 @@ impl<'a> PublicationCache<'a> { if let Some(complete) = conf.complete { queryable = queryable.complete(complete); } - let queryable = queryable.res_sync()?; + let queryable = queryable.wait()?; // take local ownership of stuff to be moved into task let sub_recv = local_sub.handler().clone(); @@ -215,7 +217,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply_sample(sample.clone()).res_async().await { + if let Err(e) = query.reply_sample(sample.clone()).await { tracing::warn!("Error replying to query: {}", e); } } @@ -229,7 +231,7 @@ impl<'a> PublicationCache<'a> { continue; } } - if let Err(e) = query.reply_sample(sample.clone()).res_async().await { + if let Err(e) = query.reply_sample(sample.clone()).await { tracing::warn!("Error replying to query: {}", e); } } @@ -261,8 +263,8 @@ impl<'a> PublicationCache<'a> { local_sub, task, } = self; - _queryable.undeclare().res_async().await?; - local_sub.undeclare().res_async().await?; + _queryable.undeclare().await?; + local_sub.undeclare().await?; task.terminate(Duration::from_secs(10)); Ok(()) }) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 35eb9afe46..6febef7395 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -13,14 +13,15 @@ // use std::collections::{btree_map, BTreeMap, VecDeque}; use std::convert::TryInto; -use std::future::Ready; +use std::future::{IntoFuture, Ready}; use std::mem::swap; use std::sync::{Arc, Mutex}; use std::time::Duration; -use zenoh::core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh::core::{Resolvable, Resolve}; use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; use zenoh::internal::zlock; use zenoh::key_expr::KeyExpr; +use zenoh::prelude::Wait; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; use zenoh::sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}; use zenoh::selector::Selector; @@ -223,13 +224,13 @@ where type To = ZResult>; } -impl SyncResolve for QueryingSubscriberBuilder<'_, '_, KeySpace, Handler> +impl Wait for QueryingSubscriberBuilder<'_, '_, KeySpace, Handler> where KeySpace: Into + Clone, Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let session = self.session.clone(); let key_expr = self.key_expr?; let key_space = self.key_space.clone().into(); @@ -257,31 +258,32 @@ where .consolidation(query_consolidation) .accept_replies(query_accept_replies) .timeout(query_timeout) - .res_sync(), + .wait(), crate::KeySpace::Liveliness => session .liveliness() .get(key_expr) .callback(cb) .timeout(query_timeout) - .res_sync(), + .wait(), }, handler: self.handler, phantom: std::marker::PhantomData, } - .res_sync() + .wait() } } -impl<'a, KeySpace, Handler> AsyncResolve for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> +impl<'a, KeySpace, Handler> IntoFuture for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> where KeySpace: Into + Clone, Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -551,14 +553,14 @@ impl< Handler, Fetch: FnOnce(Box) -> ZResult<()> + Send + Sync, TryIntoSample, - > SyncResolve for FetchingSubscriberBuilder<'_, '_, KeySpace, Handler, Fetch, TryIntoSample> + > Wait for FetchingSubscriberBuilder<'_, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, TryIntoSample: ExtractSample + Send + Sync, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { FetchingSubscriber::new(self.with_static_keys()) } } @@ -569,17 +571,18 @@ impl< Handler, Fetch: FnOnce(Box) -> ZResult<()> + Send + Sync, TryIntoSample, - > AsyncResolve for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> + > IntoFuture for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, TryIntoSample: ExtractSample + Send + Sync, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -595,20 +598,18 @@ where /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// use zenoh_ext::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { -/// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) -/// .res_sync() +/// .wait() /// }) -/// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -689,13 +690,13 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { .callback(sub_callback) .reliability(conf.reliability) .allowed_origin(conf.origin) - .res_sync()?, + .wait()?, crate::KeySpace::Liveliness => conf .session .liveliness() .declare_subscriber(&key_expr) .callback(sub_callback) - .res_sync()?, + .wait()?, }; let fetch_subscriber = FetchingSubscriber { @@ -732,10 +733,10 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { @@ -743,9 +744,8 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// @@ -756,9 +756,8 @@ impl<'a, Handler> FetchingSubscriber<'a, Handler> { /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// # } @@ -814,10 +813,10 @@ impl Drop for RepliesHandler { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// # use zenoh::prelude::r#async::*; +/// # use zenoh::prelude::*; /// # use zenoh_ext::*; /// # -/// # let session = zenoh::open(config::peer()).res().await.unwrap(); +/// # let session = zenoh::open(config::peer()).await.unwrap(); /// # let mut fetching_subscriber = session /// # .declare_subscriber("key/expr") /// # .fetching( |cb| { @@ -825,9 +824,8 @@ impl Drop for RepliesHandler { /// # session /// # .get("key/expr") /// # .callback(cb) -/// # .res_sync() +/// # .wait() /// # }) -/// # .res() /// # .await /// # .unwrap(); /// # @@ -837,9 +835,8 @@ impl Drop for RepliesHandler { /// session /// .get("key/expr") /// .callback(cb) -/// .res_sync() +/// .wait() /// }) -/// .res() /// .await /// .unwrap(); /// # } @@ -865,26 +862,27 @@ where type To = ZResult<()>; } -impl) -> ZResult<()>, TryIntoSample> - SyncResolve for FetchBuilder +impl) -> ZResult<()>, TryIntoSample> Wait + for FetchBuilder where TryIntoSample: ExtractSample, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let handler = register_handler(self.state, self.callback); run_fetch(self.fetch, handler) } } impl) -> ZResult<()>, TryIntoSample> - AsyncResolve for FetchBuilder + IntoFuture for FetchBuilder where TryIntoSample: ExtractSample, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 3f23239b29..d005cafc86 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -62,14 +62,14 @@ impl<'s> SessionExt<'s, 'static> for Arc { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh::config::ModeDependentValue::Unique; /// use zenoh_ext::SessionExt; /// /// let mut config = config::default(); /// config.timestamping.set_enabled(Some(Unique(true))); - /// let session = zenoh::open(config).res().await.unwrap().into_arc(); - /// let publication_cache = session.declare_publication_cache("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config).await.unwrap().into_arc(); + /// let publication_cache = session.declare_publication_cache("key/expression").await.unwrap(); /// tokio::task::spawn(async move { /// publication_cache.key_expr(); /// }).await; diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 3176745c95..8c3b1239b6 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -60,10 +60,10 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { @@ -71,9 +71,8 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -106,14 +105,13 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .querying() - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -141,10 +139,10 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilde /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { @@ -152,9 +150,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilde /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -199,14 +196,13 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilde /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .querying() - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -254,10 +250,10 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .liveliness() /// .declare_subscriber("key/expr") @@ -267,9 +263,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// .liveliness() /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -315,15 +310,14 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .liveliness() /// .declare_subscriber("key/expr") /// .querying() - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index c221d7f27c..e720fde1c3 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -25,7 +25,7 @@ use std::{ hash::{Hash, Hasher}, sync::Arc, }; -use zenoh_core::{Result as ZResult, SyncResolve}; +use zenoh_core::{Result as ZResult, Wait}; use zenoh_keyexpr::keyexpr; use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use zenoh_transport::{ @@ -72,7 +72,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { match ZBytes::try_from(value) { Ok(zbuf) => { - let _ = query.reply(key_expr, zbuf).res_sync(); + let _ = query.reply(key_expr, zbuf).wait(); } Err(e) => tracing::debug!("Admin query error: {}", e), } @@ -89,7 +89,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { if let Ok(value) = serde_json::value::to_value(link) { match ZBytes::try_from(value) { Ok(zbuf) => { - let _ = query.reply(key_expr, zbuf).res_sync(); + let _ = query.reply(key_expr, zbuf).wait(); } Err(e) => tracing::debug!("Admin query error: {}", e), } diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 711cb063f6..5285825b29 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -1,5 +1,3 @@ -use std::future::Ready; - // // Copyright (c) 2024 ZettaScale Technology // @@ -27,7 +25,8 @@ use crate::api::sample::SourceInfo; use crate::api::session::SessionRef; use crate::api::value::Value; use crate::api::{encoding::Encoding, publication::Publisher}; -use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; +use std::future::{IntoFuture, Ready}; +use zenoh_core::{Resolvable, Result as ZResult, Wait}; use zenoh_protocol::core::CongestionControl; use zenoh_protocol::network::Mapping; @@ -57,14 +56,13 @@ pub struct PublicationBuilderDelete; /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// session /// .put("key/expression", "payload") /// .encoding(Encoding::TEXT_PLAIN) /// .congestion_control(CongestionControl::Block) -/// .res() /// .await /// .unwrap(); /// # } @@ -179,9 +177,9 @@ impl Resolvable for PublicationBuilder { type To = ZResult<()>; } -impl SyncResolve for PublicationBuilder, PublicationBuilderPut> { +impl Wait for PublicationBuilder, PublicationBuilderPut> { #[inline] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; publisher.resolve_put( self.kind.payload, @@ -196,9 +194,9 @@ impl SyncResolve for PublicationBuilder, PublicationBui } } -impl SyncResolve for PublicationBuilder, PublicationBuilderDelete> { +impl Wait for PublicationBuilder, PublicationBuilderDelete> { #[inline] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let publisher = self.publisher.create_one_shot_publisher()?; publisher.resolve_put( ZBytes::empty(), @@ -213,19 +211,21 @@ impl SyncResolve for PublicationBuilder, PublicationBui } } -impl AsyncResolve for PublicationBuilder, PublicationBuilderPut> { - type Future = Ready; +impl IntoFuture for PublicationBuilder, PublicationBuilderPut> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl AsyncResolve for PublicationBuilder, PublicationBuilderDelete> { - type Future = Ready; +impl IntoFuture for PublicationBuilder, PublicationBuilderDelete> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -235,13 +235,12 @@ impl AsyncResolve for PublicationBuilder, PublicationBu /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let publisher = session /// .declare_publisher("key/expression") /// .congestion_control(CongestionControl::Block) -/// .res() /// .await /// .unwrap(); /// # } @@ -327,12 +326,12 @@ impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { type To = ZResult>; } -impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { - fn res_sync(self) -> ::To { +impl<'a, 'b> Wait for PublisherBuilder<'a, 'b> { + fn wait(self) -> ::To { let mut key_expr = self.key_expr?; if !key_expr.is_fully_optimized(&self.session) { let session_id = self.session.id; - let expr_id = self.session.declare_prefix(key_expr.as_str()).res_sync(); + let expr_id = self.session.declare_prefix(key_expr.as_str()).wait(); let prefix_len = key_expr .len() .try_into() @@ -362,7 +361,7 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { } self.session .declare_publication_intent(key_expr.clone()) - .res_sync()?; + .wait()?; #[cfg(feature = "unstable")] let eid = self.session.runtime.next_id(); let publisher = Publisher { @@ -380,16 +379,17 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { } } -impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { - type Future = Ready; +impl<'a, 'b> IntoFuture for PublisherBuilder<'a, 'b> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { - fn res_sync(self) -> ::To { +impl Wait for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { + fn wait(self) -> ::To { self.publisher.resolve_put( self.kind.payload, SampleKind::Put, @@ -403,8 +403,8 @@ impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { } } -impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { - fn res_sync(self) -> ::To { +impl Wait for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + fn wait(self) -> ::To { self.publisher.resolve_put( ZBytes::empty(), SampleKind::Delete, @@ -418,18 +418,20 @@ impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete } } -impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { - type Future = Ready; +impl IntoFuture for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { - type Future = Ready; +impl IntoFuture for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index dbcad9c50c..a6f8ff1629 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -14,8 +14,8 @@ //! Tools to access information about the current zenoh [`Session`](crate::Session). use super::session::SessionRef; -use std::future::Ready; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use std::future::{IntoFuture, Ready}; +use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::core::{WhatAmI, ZenohId}; /// A builder retuned by [`SessionInfo::zid()`](SessionInfo::zid) that allows @@ -25,10 +25,10 @@ use zenoh_protocol::core::{WhatAmI, ZenohId}; /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let zid = session.info().zid().res().await; +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let zid = session.info().zid().await; /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -41,17 +41,18 @@ impl<'a> Resolvable for ZidBuilder<'a> { type To = ZenohId; } -impl<'a> SyncResolve for ZidBuilder<'a> { - fn res_sync(self) -> Self::To { +impl<'a> Wait for ZidBuilder<'a> { + fn wait(self) -> Self::To { self.session.runtime.zid() } } -impl<'a> AsyncResolve for ZidBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for ZidBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -63,10 +64,10 @@ impl<'a> AsyncResolve for ZidBuilder<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let mut routers_zid = session.info().routers_zid().res().await; +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let mut routers_zid = session.info().routers_zid().await; /// while let Some(router_zid) = routers_zid.next() {} /// # } /// ``` @@ -80,8 +81,8 @@ impl<'a> Resolvable for RoutersZidBuilder<'a> { type To = Box + Send + Sync>; } -impl<'a> SyncResolve for RoutersZidBuilder<'a> { - fn res_sync(self) -> Self::To { +impl<'a> Wait for RoutersZidBuilder<'a> { + fn wait(self) -> Self::To { Box::new( zenoh_runtime::ZRuntime::Application .block_in_place(self.session.runtime.manager().get_transports_unicast()) @@ -96,11 +97,12 @@ impl<'a> SyncResolve for RoutersZidBuilder<'a> { } } -impl<'a> AsyncResolve for RoutersZidBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for RoutersZidBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -111,11 +113,11 @@ impl<'a> AsyncResolve for RoutersZidBuilder<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let zid = session.info().zid().res().await; -/// let mut peers_zid = session.info().peers_zid().res().await; +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let zid = session.info().zid().await; +/// let mut peers_zid = session.info().peers_zid().await; /// while let Some(peer_zid) = peers_zid.next() {} /// # } /// ``` @@ -129,8 +131,8 @@ impl<'a> Resolvable for PeersZidBuilder<'a> { type To = Box + Send + Sync>; } -impl<'a> SyncResolve for PeersZidBuilder<'a> { - fn res_sync(self) -> ::To { +impl<'a> Wait for PeersZidBuilder<'a> { + fn wait(self) -> ::To { Box::new( zenoh_runtime::ZRuntime::Application .block_in_place(self.session.runtime.manager().get_transports_unicast()) @@ -145,11 +147,12 @@ impl<'a> SyncResolve for PeersZidBuilder<'a> { } } -impl<'a> AsyncResolve for PeersZidBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for PeersZidBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -160,11 +163,11 @@ impl<'a> AsyncResolve for PeersZidBuilder<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let info = session.info(); -/// let zid = info.zid().res().await; +/// let zid = info.zid().await; /// # } /// ``` pub struct SessionInfo<'a> { @@ -178,10 +181,10 @@ impl SessionInfo<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let zid = session.info().zid().res().await; + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let zid = session.info().zid().await; /// # } /// ``` pub fn zid(&self) -> ZidBuilder<'_> { @@ -197,10 +200,10 @@ impl SessionInfo<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let mut routers_zid = session.info().routers_zid().res().await; + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let mut routers_zid = session.info().routers_zid().await; /// while let Some(router_zid) = routers_zid.next() {} /// # } /// ``` @@ -216,10 +219,10 @@ impl SessionInfo<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let mut peers_zid = session.info().peers_zid().res().await; + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let mut peers_zid = session.info().peers_zid().await; /// while let Some(peer_zid) = peers_zid.next() {} /// # } /// ``` diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 774cf28790..20dcf9cbee 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -14,12 +14,13 @@ use super::session::{Session, Undeclarable}; use crate::net::primitives::Primitives; +use std::future::IntoFuture; use std::{ convert::{TryFrom, TryInto}, future::Ready, str::FromStr, }; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::{ core::{key_expr::canon::Canonizable, ExprId, WireExpr}, @@ -562,11 +563,11 @@ impl<'a> Undeclarable<&'a Session, KeyExprUndeclaration<'a>> for KeyExpr<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let key_expr = session.declare_keyexpr("key/expression").res().await.unwrap(); -/// session.undeclare(key_expr).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let key_expr = session.declare_keyexpr("key/expression").await.unwrap(); +/// session.undeclare(key_expr).await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -579,8 +580,8 @@ impl Resolvable for KeyExprUndeclaration<'_> { type To = ZResult<()>; } -impl SyncResolve for KeyExprUndeclaration<'_> { - fn res_sync(self) -> ::To { +impl Wait for KeyExprUndeclaration<'_> { + fn wait(self) -> ::To { let KeyExprUndeclaration { session, expr } = self; let expr_id = match &expr.0 { KeyExprInner::Wire { @@ -629,11 +630,12 @@ impl SyncResolve for KeyExprUndeclaration<'_> { } } -impl AsyncResolve for KeyExprUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for KeyExprUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index 761704d7d2..f7235426c3 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -21,10 +21,11 @@ use super::{ subscriber::{Subscriber, SubscriberInner}, Id, }; +use std::future::IntoFuture; use std::{convert::TryInto, future::Ready, sync::Arc, time::Duration}; use zenoh_config::unwrap_or_default; -use zenoh_core::Resolve; -use zenoh_core::{AsyncResolve, Resolvable, Result as ZResult, SyncResolve}; +use zenoh_core::{Resolvable, Result as ZResult}; +use zenoh_core::{Resolve, Wait}; use zenoh_keyexpr::keyexpr; use zenoh_protocol::network::{declare::subscriber::ext::SubscriberInfo, request}; @@ -55,13 +56,12 @@ lazy_static::lazy_static!( /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// # } @@ -83,13 +83,12 @@ impl<'a> Liveliness<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// # } @@ -119,10 +118,10 @@ impl<'a> Liveliness<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session.liveliness().declare_subscriber("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let subscriber = session.liveliness().declare_subscriber("key/expression").await.unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { /// match sample.kind() { /// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), @@ -157,10 +156,10 @@ impl<'a> Liveliness<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let replies = session.liveliness().get("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let replies = session.liveliness().get("key/expression").await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { /// if let Ok(sample) = reply.result() { /// println!(">> Liveliness token {}", sample.key_expr()); @@ -197,13 +196,12 @@ impl<'a> Liveliness<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// # } @@ -222,9 +220,9 @@ impl<'a> Resolvable for LivelinessTokenBuilder<'a, '_> { } #[zenoh_macros::unstable] -impl SyncResolve for LivelinessTokenBuilder<'_, '_> { +impl Wait for LivelinessTokenBuilder<'_, '_> { #[inline] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let session = self.session; let key_expr = self.key_expr?.into_owned(); session @@ -238,11 +236,12 @@ impl SyncResolve for LivelinessTokenBuilder<'_, '_> { } #[zenoh_macros::unstable] -impl AsyncResolve for LivelinessTokenBuilder<'_, '_> { - type Future = Ready; +impl IntoFuture for LivelinessTokenBuilder<'_, '_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -272,13 +271,12 @@ pub(crate) struct LivelinessTokenState { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// # } @@ -297,17 +295,16 @@ pub struct LivelinessToken<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// -/// liveliness.undeclare().res().await.unwrap(); +/// liveliness.undeclare().await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -322,19 +319,20 @@ impl Resolvable for LivelinessTokenUndeclaration<'_> { } #[zenoh_macros::unstable] -impl SyncResolve for LivelinessTokenUndeclaration<'_> { - fn res_sync(mut self) -> ::To { +impl Wait for LivelinessTokenUndeclaration<'_> { + fn wait(mut self) -> ::To { self.token.alive = false; self.token.session.undeclare_liveliness(self.token.state.id) } } #[zenoh_macros::unstable] -impl<'a> AsyncResolve for LivelinessTokenUndeclaration<'a> { - type Future = Ready; +impl<'a> IntoFuture for LivelinessTokenUndeclaration<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -350,17 +348,16 @@ impl<'a> LivelinessToken<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// - /// liveliness.undeclare().res().await.unwrap(); + /// liveliness.undeclare().await.unwrap(); /// # } /// ``` #[inline] @@ -391,13 +388,12 @@ impl Drop for LivelinessToken<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() -/// .res() /// .await /// .unwrap(); /// # } @@ -419,13 +415,12 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) - /// .res() /// .await /// .unwrap(); /// # } @@ -460,14 +455,13 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut n = 0; /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback_mut(move |_sample| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -490,13 +484,12 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -533,13 +526,13 @@ where } #[zenoh_macros::unstable] -impl<'a, Handler> SyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> Wait for LivelinessSubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { #[zenoh_macros::unstable] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; let (callback, handler) = self.handler.into_handler(); @@ -563,16 +556,17 @@ where } #[zenoh_macros::unstable] -impl<'a, Handler> AsyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> IntoFuture for LivelinessSubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; #[zenoh_macros::unstable] - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -583,13 +577,12 @@ where /// # #[tokio::main] /// # async fn main() { /// # use std::convert::TryFrom; -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let tokens = session /// .liveliness() /// .get("key/expression") -/// .res() /// .await /// .unwrap(); /// while let Ok(token) = tokens.recv_async().await { @@ -616,14 +609,13 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session /// .liveliness() /// .get("key/expression") /// .callback(|reply| { println!("Received {:?}", reply.result()); }) - /// .res() /// .await /// .unwrap(); /// # } @@ -656,15 +648,14 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut n = 0; /// let queryable = session /// .liveliness() /// .get("key/expression") /// .callback_mut(move |reply| {n += 1;}) - /// .res() /// .await /// .unwrap(); /// # } @@ -686,14 +677,13 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let replies = session /// .liveliness() /// .get("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { @@ -738,12 +728,12 @@ where type To = ZResult; } -impl SyncResolve for LivelinessGetBuilder<'_, '_, Handler> +impl Wait for LivelinessGetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); self.session .query( @@ -764,14 +754,15 @@ where } } -impl AsyncResolve for LivelinessGetBuilder<'_, '_, Handler> +impl IntoFuture for LivelinessGetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index f41c35b720..518ddc4d1b 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -25,13 +25,14 @@ use super::{ }; use crate::net::primitives::Primitives; use futures::Sink; +use std::future::IntoFuture; use std::{ convert::TryFrom, future::Ready, pin::Pin, task::{Context, Poll}, }; -use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_core::{zread, Resolvable, Resolve, Wait}; use zenoh_keyexpr::keyexpr; use zenoh_protocol::{ core::CongestionControl, @@ -86,11 +87,11 @@ impl std::fmt::Debug for PublisherRef<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// publisher.put("value").res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// publisher.put("value").await.unwrap(); /// # } /// ``` /// @@ -101,11 +102,11 @@ impl std::fmt::Debug for PublisherRef<'_> { /// # #[tokio::main] /// # async fn main() { /// use futures::StreamExt; -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let mut subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); -/// let publisher = session.declare_publisher("another/key/expression").res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let mut subscriber = session.declare_subscriber("key/expression").await.unwrap(); +/// let publisher = session.declare_publisher("another/key/expression").await.unwrap(); /// subscriber.stream().map(Ok).forward(publisher).await.unwrap(); /// # } /// ``` @@ -128,11 +129,10 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let publisher = session.declare_publisher("key/expression") - /// .res() /// .await /// .unwrap(); /// let publisher_id = publisher.id(); @@ -184,11 +184,11 @@ impl<'a> Publisher<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -212,11 +212,11 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.put("value").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// publisher.put("value").await.unwrap(); /// # } /// ``` #[inline] @@ -244,11 +244,11 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.delete().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// publisher.delete().await.unwrap(); /// # } /// ``` pub fn delete(&self) -> PublisherDeleteBuilder<'_> { @@ -272,13 +272,12 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_subscribers: bool = publisher /// .matching_status() - /// .res() /// .await /// .unwrap() /// .matching_subscribers(); @@ -301,11 +300,11 @@ impl<'a> Publisher<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { /// if matching_status.matching_subscribers() { /// println!("Publisher has matching subscribers."); @@ -329,11 +328,11 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.undeclare().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// publisher.undeclare().await.unwrap(); /// # } /// ``` pub fn undeclare(self) -> impl Resolve> + 'a { @@ -353,11 +352,11 @@ impl<'a> Publisher<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); -/// let matching_listener = publisher.matching_listener().res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); +/// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -376,11 +375,11 @@ pub trait PublisherDeclarations { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -403,11 +402,11 @@ impl PublisherDeclarations for std::sync::Arc> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -441,11 +440,11 @@ impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// publisher.undeclare().res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// publisher.undeclare().await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -457,24 +456,25 @@ impl Resolvable for PublisherUndeclaration<'_> { type To = ZResult<()>; } -impl SyncResolve for PublisherUndeclaration<'_> { - fn res_sync(mut self) -> ::To { +impl Wait for PublisherUndeclaration<'_> { + fn wait(mut self) -> ::To { let Publisher { session, key_expr, .. } = &self.publisher; session .undeclare_publication_intent(key_expr.clone()) - .res_sync()?; + .wait()?; self.publisher.key_expr = unsafe { keyexpr::from_str_unchecked("") }.into(); Ok(()) } } -impl AsyncResolve for PublisherUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for PublisherUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -484,7 +484,7 @@ impl Drop for Publisher<'_> { let _ = self .session .undeclare_publication_intent(self.key_expr.clone()) - .res_sync(); + .wait(); } } } @@ -726,11 +726,11 @@ impl TryFrom for Priority { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// let matching_status = publisher.matching_status().res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// let matching_status = publisher.matching_status().await.unwrap(); /// # } /// ``` #[zenoh_macros::unstable] @@ -747,13 +747,12 @@ impl MatchingStatus { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_subscribers: bool = publisher /// .matching_status() - /// .res() /// .await /// .unwrap() /// .matching_subscribers(); @@ -780,10 +779,10 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() /// .callback(|matching_status| { @@ -793,7 +792,6 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// println!("Publisher has NO MORE matching subscribers."); /// } /// }) - /// .res() /// .await /// .unwrap(); /// # } @@ -820,15 +818,14 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let mut n = 0; - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() /// .callback_mut(move |_matching_status| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -851,14 +848,13 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -894,13 +890,13 @@ where } #[zenoh_macros::unstable] -impl<'a, Handler> SyncResolve for MatchingListenerBuilder<'a, Handler> +impl<'a, Handler> Wait for MatchingListenerBuilder<'a, Handler> where Handler: IntoHandler<'static, MatchingStatus> + Send, Handler::Handler: Send, { #[zenoh_macros::unstable] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); self.publisher .session @@ -917,16 +913,17 @@ where } #[zenoh_macros::unstable] -impl<'a, Handler> AsyncResolve for MatchingListenerBuilder<'a, Handler> +impl<'a, Handler> IntoFuture for MatchingListenerBuilder<'a, Handler> where Handler: IntoHandler<'static, MatchingStatus> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; #[zenoh_macros::unstable] - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -978,11 +975,11 @@ impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListene /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// let matching_listener = publisher.matching_listener().res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// let matching_listener = publisher.matching_listener().await.unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { /// if matching_status.matching_subscribers() { /// println!("Publisher has matching subscribers."); @@ -1009,12 +1006,12 @@ impl<'a, Receiver> MatchingListener<'a, Receiver> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); - /// matching_listener.undeclare().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); + /// matching_listener.undeclare().await.unwrap(); /// # } /// ``` #[inline] @@ -1056,8 +1053,8 @@ impl Resolvable for MatchingListenerUndeclaration<'_> { } #[zenoh_macros::unstable] -impl SyncResolve for MatchingListenerUndeclaration<'_> { - fn res_sync(mut self) -> ::To { +impl Wait for MatchingListenerUndeclaration<'_> { + fn wait(mut self) -> ::To { self.subscriber.alive = false; self.subscriber .publisher @@ -1067,11 +1064,12 @@ impl SyncResolve for MatchingListenerUndeclaration<'_> { } #[zenoh_macros::unstable] -impl AsyncResolve for MatchingListenerUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for MatchingListenerUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -1091,7 +1089,7 @@ impl Drop for MatchingListenerInner<'_> { mod tests { use crate::api::{sample::SampleKind, session::SessionDeclarations}; use zenoh_config::Config; - use zenoh_core::SyncResolve; + use zenoh_core::Wait; #[test] fn priority_from() { @@ -1125,13 +1123,13 @@ mod tests { const VALUE: &str = "zenoh"; fn sample_kind_integrity_in_publication_with(kind: SampleKind) { - let session = open(Config::default()).res().unwrap(); - let sub = session.declare_subscriber(KEY_EXPR).res().unwrap(); - let pub_ = session.declare_publisher(KEY_EXPR).res().unwrap(); + let session = open(Config::default()).wait().unwrap(); + let sub = session.declare_subscriber(KEY_EXPR).wait().unwrap(); + let pub_ = session.declare_publisher(KEY_EXPR).wait().unwrap(); match kind { - SampleKind::Put => pub_.put(VALUE).res().unwrap(), - SampleKind::Delete => pub_.delete().res().unwrap(), + SampleKind::Put => pub_.put(VALUE).wait().unwrap(), + SampleKind::Delete => pub_.delete().wait().unwrap(), } let sample = sub.recv().unwrap(); @@ -1148,18 +1146,17 @@ mod tests { #[test] fn sample_kind_integrity_in_put_builder() { use crate::api::session::open; - use zenoh_core::SyncResolve; const KEY_EXPR: &str = "test/sample_kind_integrity/put_builder"; const VALUE: &str = "zenoh"; fn sample_kind_integrity_in_put_builder_with(kind: SampleKind) { - let session = open(Config::default()).res().unwrap(); - let sub = session.declare_subscriber(KEY_EXPR).res().unwrap(); + let session = open(Config::default()).wait().unwrap(); + let sub = session.declare_subscriber(KEY_EXPR).wait().unwrap(); match kind { - SampleKind::Put => session.put(KEY_EXPR, VALUE).res().unwrap(), - SampleKind::Delete => session.delete(KEY_EXPR).res().unwrap(), + SampleKind::Put => session.put(KEY_EXPR, VALUE).wait().unwrap(), + SampleKind::Delete => session.delete(KEY_EXPR).wait().unwrap(), } let sample = sub.recv().unwrap(); diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 1cb4078ee6..311402b618 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -24,8 +24,9 @@ use super::{ session::Session, value::Value, }; +use std::future::IntoFuture; use std::{collections::HashMap, future::Ready, time::Duration}; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_protocol::core::{CongestionControl, ZenohId}; use zenoh_result::ZResult; @@ -120,14 +121,13 @@ pub(crate) struct QueryState { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let replies = session /// .get("key/expression?value>1") /// .target(QueryTarget::All) /// .consolidation(ConsolidationMode::None) -/// .res() /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { @@ -225,13 +225,12 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session /// .get("key/expression") /// .callback(|reply| {println!("Received {:?}", reply.result());}) - /// .res() /// .await /// .unwrap(); /// # } @@ -284,14 +283,13 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut n = 0; /// let queryable = session /// .get("key/expression") /// .callback_mut(move |reply| {n += 1;}) - /// .res() /// .await /// .unwrap(); /// # } @@ -313,13 +311,12 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let replies = session /// .get("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { @@ -444,12 +441,12 @@ where type To = ZResult; } -impl SyncResolve for GetBuilder<'_, '_, Handler> +impl Wait for GetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); self.session @@ -472,14 +469,15 @@ where } } -impl AsyncResolve for GetBuilder<'_, '_, Handler> +impl IntoFuture for GetBuilder<'_, '_, Handler> where Handler: IntoHandler<'static, Reply> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index dc13468181..c83b4b6081 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -25,6 +25,7 @@ use super::{ Id, }; use crate::net::primitives::Primitives; +use std::future::IntoFuture; use std::{ fmt, future::Ready, @@ -32,7 +33,7 @@ use std::{ sync::Arc, }; use uhlc::Timestamp; -use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_core::{Resolvable, Resolve, Wait}; use zenoh_protocol::{ core::{CongestionControl, EntityId, WireExpr, ZenohId}, network::{response, Mapping, RequestId, Response, ResponseFinal}, @@ -265,17 +266,18 @@ impl Resolvable for ReplySample<'_> { type To = ZResult<()>; } -impl SyncResolve for ReplySample<'_> { - fn res_sync(self) -> ::To { +impl Wait for ReplySample<'_> { + fn wait(self) -> ::To { self.query._reply_sample(self.sample) } } -impl AsyncResolve for ReplySample<'_> { - type Future = Ready; +impl IntoFuture for ReplySample<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -383,8 +385,8 @@ impl Resolvable for ReplyBuilder<'_, '_, T> { type To = ZResult<()>; } -impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { - fn res_sync(self) -> ::To { +impl Wait for ReplyBuilder<'_, '_, ReplyBuilderPut> { + fn wait(self) -> ::To { let key_expr = self.key_expr?.into_owned(); let sample = SampleBuilder::put(key_expr, self.kind.payload) .encoding(self.kind.encoding) @@ -398,8 +400,8 @@ impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { } } -impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { - fn res_sync(self) -> ::To { +impl Wait for ReplyBuilder<'_, '_, ReplyBuilderDelete> { + fn wait(self) -> ::To { let key_expr = self.key_expr?.into_owned(); let sample = SampleBuilder::delete(key_expr) .timestamp(self.timestamp) @@ -472,19 +474,21 @@ impl Query { } } -impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { - type Future = Ready; +impl IntoFuture for ReplyBuilder<'_, '_, ReplyBuilderPut> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { - type Future = Ready; +impl IntoFuture for ReplyBuilder<'_, '_, ReplyBuilderDelete> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -521,8 +525,8 @@ impl<'a> Resolvable for ReplyErrBuilder<'a> { type To = ZResult<()>; } -impl SyncResolve for ReplyErrBuilder<'_> { - fn res_sync(self) -> ::To { +impl Wait for ReplyErrBuilder<'_> { + fn wait(self) -> ::To { self.query.inner.primitives.send_response(Response { rid: self.query.inner.qid, wire_expr: WireExpr { @@ -549,11 +553,12 @@ impl SyncResolve for ReplyErrBuilder<'_> { } } -impl<'a> AsyncResolve for ReplyErrBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for ReplyErrBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -589,14 +594,13 @@ impl fmt::Debug for QueryableState { /// # #[tokio::main] /// # async fn main() { /// use futures::prelude::*; -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let queryable = session.declare_queryable("key/expression").await.unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); /// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") -/// .res() /// .await /// .unwrap(); /// } @@ -621,11 +625,11 @@ impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); -/// queryable.undeclare().res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let queryable = session.declare_queryable("key/expression").await.unwrap(); +/// queryable.undeclare().await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -637,8 +641,8 @@ impl Resolvable for QueryableUndeclaration<'_> { type To = ZResult<()>; } -impl SyncResolve for QueryableUndeclaration<'_> { - fn res_sync(mut self) -> ::To { +impl Wait for QueryableUndeclaration<'_> { + fn wait(mut self) -> ::To { self.queryable.alive = false; self.queryable .session @@ -646,11 +650,12 @@ impl SyncResolve for QueryableUndeclaration<'_> { } } -impl<'a> AsyncResolve for QueryableUndeclaration<'a> { - type Future = Ready; +impl<'a> IntoFuture for QueryableUndeclaration<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -668,10 +673,10 @@ impl Drop for CallbackQueryable<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); +/// let queryable = session.declare_queryable("key/expression").await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -691,13 +696,12 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session /// .declare_queryable("key/expression") /// .callback(|query| {println!(">> Handling query '{}'", query.selector());}) - /// .res() /// .await /// .unwrap(); /// # } @@ -732,14 +736,13 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut n = 0; /// let queryable = session /// .declare_queryable("key/expression") /// .callback_mut(move |query| {n += 1;}) - /// .res() /// .await /// .unwrap(); /// # } @@ -761,13 +764,12 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session /// .declare_queryable("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(query) = queryable.recv_async().await { @@ -827,19 +829,17 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session /// .declare_queryable("key/expression") /// .with(flume::bounded(32)) -/// .res() /// .await /// .unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); /// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") -/// .res() /// .await /// .unwrap(); /// } @@ -859,11 +859,10 @@ impl<'a, Handler> Queryable<'a, Handler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let queryable = session.declare_queryable("key/expression") - /// .res() /// .await /// .unwrap(); /// let queryable_id = queryable.id(); @@ -925,12 +924,12 @@ where type To = ZResult>; } -impl<'a, Handler> SyncResolve for QueryableBuilder<'a, '_, Handler> +impl<'a, Handler> Wait for QueryableBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Query> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let session = self.session; let (callback, receiver) = self.handler.into_handler(); session @@ -951,14 +950,15 @@ where } } -impl<'a, Handler> AsyncResolve for QueryableBuilder<'a, '_, Handler> +impl<'a, Handler> IntoFuture for QueryableBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Query> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index c4e411dec9..8e7853a411 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -13,10 +13,11 @@ // use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; use crate::net::runtime::{orchestrator::Loop, Runtime}; +use std::future::IntoFuture; use std::time::Duration; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; use tokio::net::UdpSocket; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::{core::WhatAmIMatcher, scouting::Hello}; use zenoh_result::ZResult; use zenoh_task::TerminatableTask; @@ -27,10 +28,9 @@ use zenoh_task::TerminatableTask; /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) -/// .res() /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { @@ -53,11 +53,10 @@ impl ScoutBuilder { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) - /// .res() /// .await /// .unwrap(); /// # } @@ -88,12 +87,11 @@ impl ScoutBuilder { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let mut n = 0; /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback_mut(move |_hello| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -115,11 +113,10 @@ impl ScoutBuilder { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { @@ -153,26 +150,27 @@ where type To = ZResult>; } -impl SyncResolve for ScoutBuilder +impl Wait for ScoutBuilder where Handler: IntoHandler<'static, Hello> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let (callback, receiver) = self.handler.into_handler(); _scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) } } -impl AsyncResolve for ScoutBuilder +impl IntoFuture for ScoutBuilder where Handler: IntoHandler<'static, Hello> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -182,11 +180,10 @@ where /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) -/// .res() /// .await /// .unwrap(); /// # } @@ -203,11 +200,10 @@ impl ScoutInner { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .callback(|hello| { println!("{}", hello); }) - /// .res() /// .await /// .unwrap(); /// scout.stop(); @@ -239,11 +235,10 @@ impl fmt::Debug for ScoutInner { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) -/// .res() /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { @@ -273,11 +268,10 @@ impl Scout { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let scout = zenoh::scout(WhatAmI::Router, config::default()) /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// let _router = scout.recv_async().await; @@ -350,11 +344,10 @@ fn _scout( /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// use zenoh::scouting::WhatAmI; /// /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) -/// .res() /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 01fc345c3b..dea322419c 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -32,6 +32,7 @@ use super::{ Id, }; use crate::net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}; +use std::future::IntoFuture; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, @@ -49,9 +50,7 @@ use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; use zenoh_config::{unwrap_or_default, Config, Notifier}; -use zenoh_core::{ - zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, SyncResolve, -}; +use zenoh_core::{zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, Wait}; #[cfg(feature = "unstable")] use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ @@ -78,7 +77,6 @@ use zenoh_result::ZResult; #[cfg(all(feature = "unstable", feature = "shared-memory"))] use zenoh_shm::api::client_storage::SharedMemoryClientStorage; use zenoh_task::TaskController; -use zenoh_util::core::AsyncResolve; #[cfg(feature = "unstable")] use super::{ @@ -452,11 +450,10 @@ impl Session { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -486,10 +483,10 @@ impl Session { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = Session::leak(zenoh::open(config::peer()).res().await.unwrap()); - /// let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); + /// let session = Session::leak(zenoh::open(config::peer()).await.unwrap()); + /// let subscriber = session.declare_subscriber("key/expression").await.unwrap(); /// tokio::task::spawn(async move { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); @@ -504,7 +501,7 @@ impl Session { /// Returns the identifier of the current session. `zid()` is a convenient shortcut. /// See [`Session::info()`](`Session::info()`) and [`SessionInfo::zid()`](`SessionInfo::zid()`) for more details. pub fn zid(&self) -> ZenohId { - self.info().zid().res_sync() + self.info().zid().wait() } pub fn hlc(&self) -> Option<&HLC> { @@ -520,10 +517,10 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// session.close().res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// session.close().await.unwrap(); /// # } /// ``` pub fn close(mut self) -> impl Resolve> { @@ -563,9 +560,9 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let peers = session.config().get("connect/endpoints").unwrap(); /// # } /// ``` @@ -574,9 +571,9 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let _ = session.config().insert_json5("connect/endpoints", r#"["tcp/127.0.0.1/7447"]"#); /// # } /// ``` @@ -635,10 +632,10 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let key_expr = session.declare_keyexpr("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let key_expr = session.declare_keyexpr("key/expression").await.unwrap(); /// # } /// ``` pub fn declare_keyexpr<'a, 'b: 'a, TryIntoKeyExpr>( @@ -661,7 +658,7 @@ impl Session { ResolveClosure::new(move || { let key_expr: KeyExpr = key_expr?; let prefix_len = key_expr.len() as u32; - let expr_id = self.declare_prefix(key_expr.as_str()).res_sync(); + let expr_id = self.declare_prefix(key_expr.as_str()).wait(); let key_expr = match key_expr.0 { KeyExprInner::Borrowed(key_expr) | KeyExprInner::BorrowedWire { key_expr, .. } => { KeyExpr(KeyExprInner::BorrowedWire { @@ -697,13 +694,12 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// session /// .put("key/expression", "payload") /// .encoding(Encoding::TEXT_PLAIN) - /// .res() /// .await /// .unwrap(); /// # } @@ -743,10 +739,10 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// session.delete("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// session.delete("key/expression").await.unwrap(); /// # } /// ``` #[inline] @@ -781,10 +777,10 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let replies = session.get("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); + /// let replies = session.get("key/expression").await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { /// println!(">> Received {:?}", reply.result()); /// } @@ -858,7 +854,6 @@ impl Session { aggregated_subscribers, aggregated_publishers, ) - .res_async() .await; session.owns_runtime = true; runtime.start().await?; @@ -1091,14 +1086,14 @@ impl Session { // match key_expr.as_str().find('*') { // Some(0) => key_expr.to_wire(self), // Some(pos) => { - // let expr_id = self.declare_prefix(&key_expr.as_str()[..pos]).res_sync(); + // let expr_id = self.declare_prefix(&key_expr.as_str()[..pos]).wait(); // WireExpr { // scope: expr_id, // suffix: std::borrow::Cow::Borrowed(&key_expr.as_str()[pos..]), // } // } // None => { - // let expr_id = self.declare_prefix(key_expr.as_str()).res_sync(); + // let expr_id = self.declare_prefix(key_expr.as_str()).wait(); // WireExpr { // scope: expr_id, // suffix: std::borrow::Cow::Borrowed(""), @@ -1840,11 +1835,10 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -1882,11 +1876,10 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let queryable = session.declare_queryable("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -1894,7 +1887,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// query.reply( /// KeyExpr::try_from("key/expression").unwrap(), /// "value", - /// ).res().await.unwrap(); + /// ).await.unwrap(); /// } /// }).await; /// # } @@ -1926,14 +1919,13 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression") - /// .res() /// .await /// .unwrap(); - /// publisher.put("value").res().await.unwrap(); + /// publisher.put("value").await.unwrap(); /// # } /// ``` fn declare_publisher<'b, TryIntoKeyExpr>( @@ -1960,13 +1952,12 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// # } @@ -2424,7 +2415,7 @@ impl Primitives for Session { impl Drop for Session { fn drop(&mut self) { if self.alive { - let _ = self.clone().close().res_sync(); + let _ = self.clone().close().wait(); } } } @@ -2448,11 +2439,10 @@ impl fmt::Debug for Session { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); +/// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") -/// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -2473,11 +2463,10 @@ pub trait SessionDeclarations<'s, 'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -2506,11 +2495,10 @@ pub trait SessionDeclarations<'s, 'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let queryable = session.declare_queryable("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -2518,7 +2506,7 @@ pub trait SessionDeclarations<'s, 'a> { /// query.reply( /// KeyExpr::try_from("key/expression").unwrap(), /// "value", - /// ).res().await.unwrap(); + /// ).await.unwrap(); /// } /// }).await; /// # } @@ -2541,14 +2529,13 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression") - /// .res() /// .await /// .unwrap(); - /// publisher.put("value").res().await.unwrap(); + /// publisher.put("value").await.unwrap(); /// # } /// ``` fn declare_publisher<'b, TryIntoKeyExpr>( @@ -2565,13 +2552,12 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(config::peer()).await.unwrap().into_arc(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// # } @@ -2584,9 +2570,9 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let info = session.info(); /// # } /// ``` @@ -2639,9 +2625,9 @@ impl crate::net::primitives::EPrimitives for Session { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// # } /// ``` /// @@ -2649,13 +2635,13 @@ impl crate::net::primitives::EPrimitives for Session { /// # #[tokio::main] /// # async fn main() { /// use std::str::FromStr; -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// /// let mut config = config::peer(); /// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); /// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); /// -/// let session = zenoh::open(config).res().await.unwrap(); +/// let session = zenoh::open(config).await.unwrap(); /// # } /// ``` pub fn open(config: TryIntoConfig) -> OpenBuilder @@ -2676,9 +2662,9 @@ where /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -2712,12 +2698,12 @@ where type To = ZResult; } -impl SyncResolve for OpenBuilder +impl Wait for OpenBuilder where TryIntoConfig: std::convert::TryInto + Send + 'static, >::Error: std::fmt::Debug, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let config: crate::config::Config = self .config .try_into() @@ -2727,19 +2713,20 @@ where #[cfg(all(feature = "unstable", feature = "shared-memory"))] self.shm_clients, ) - .res_sync() + .wait() } } -impl AsyncResolve for OpenBuilder +impl IntoFuture for OpenBuilder where TryIntoConfig: std::convert::TryInto + Send + 'static, >::Error: std::fmt::Debug, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -2786,22 +2773,23 @@ impl Resolvable for InitBuilder { } #[zenoh_macros::unstable] -impl SyncResolve for InitBuilder { - fn res_sync(self) -> ::To { +impl Wait for InitBuilder { + fn wait(self) -> ::To { Ok(Session::init( self.runtime, self.aggregated_subscribers, self.aggregated_publishers, ) - .res_sync()) + .wait()) } } #[zenoh_macros::unstable] -impl AsyncResolve for InitBuilder { - type Future = Ready; +impl IntoFuture for InitBuilder { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 4ac035d736..0c4e21b547 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -19,13 +19,14 @@ use super::{ session::{SessionRef, Undeclarable}, Id, }; +use std::future::IntoFuture; use std::{ fmt, future::Ready, ops::{Deref, DerefMut}, sync::Arc, }; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::{core::Reliability, network::declare::subscriber::ext::SubscriberInfo}; use zenoh_result::ZResult; @@ -63,13 +64,12 @@ impl fmt::Debug for SubscriberState { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()) }) -/// .res() /// .await /// .unwrap(); /// # } @@ -91,17 +91,16 @@ impl<'a> SubscriberInner<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// # fn data_handler(_sample: Sample) { }; /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(data_handler) - /// .res() /// .await /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); + /// subscriber.undeclare().await.unwrap(); /// # } /// ``` #[inline] @@ -122,15 +121,14 @@ impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .res() /// .await /// .unwrap(); -/// subscriber.undeclare().res().await.unwrap(); +/// subscriber.undeclare().await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -142,8 +140,8 @@ impl Resolvable for SubscriberUndeclaration<'_> { type To = ZResult<()>; } -impl SyncResolve for SubscriberUndeclaration<'_> { - fn res_sync(mut self) -> ::To { +impl Wait for SubscriberUndeclaration<'_> { + fn wait(mut self) -> ::To { self.subscriber.alive = false; self.subscriber .session @@ -151,11 +149,12 @@ impl SyncResolve for SubscriberUndeclaration<'_> { } } -impl AsyncResolve for SubscriberUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for SubscriberUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -173,13 +172,12 @@ impl Drop for SubscriberInner<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() -/// .res() /// .await /// .unwrap(); /// # } @@ -220,13 +218,12 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) - /// .res() /// .await /// .unwrap(); /// # } @@ -263,14 +260,13 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let mut n = 0; /// let subscriber = session /// .declare_subscriber("key/expression") /// .callback_mut(move |_sample| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -292,13 +288,12 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -369,12 +364,12 @@ where type To = ZResult>; } -impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> Wait for SubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; let (callback, receiver) = self.handler.into_handler(); @@ -399,15 +394,16 @@ where } } -impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> IntoFuture for SubscriberBuilder<'a, '_, Handler> where Handler: IntoHandler<'static, Sample> + Send, Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -424,13 +420,12 @@ where /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) -/// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -452,11 +447,10 @@ impl<'a, Handler> Subscriber<'a, Handler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// let subscriber_id = subscriber.id(); @@ -498,14 +492,13 @@ impl<'a, Handler> Subscriber<'a, Handler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(config::peer()).await.unwrap(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); + /// subscriber.undeclare().await.unwrap(); /// # } /// ``` #[inline] diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 3c011e2439..6f679407c8 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -32,13 +32,13 @@ //! ### Publishing Data //! The example below shows how to produce a value for a key expression. //! ``` -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).res().await.unwrap(); -//! session.put("key/expression", "value").res().await.unwrap(); -//! session.close().res().await.unwrap(); +//! let session = zenoh::open(config::default()).await.unwrap(); +//! session.put("key/expression", "value").await.unwrap(); +//! session.close().await.unwrap(); //! } //! ``` //! @@ -46,12 +46,12 @@ //! The example below shows how to consume values for a key expresison. //! ```no_run //! use futures::prelude::*; -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).res().await.unwrap(); -//! let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); +//! let session = zenoh::open(config::default()).await.unwrap(); +//! let subscriber = session.declare_subscriber("key/expression").await.unwrap(); //! while let Ok(sample) = subscriber.recv_async().await { //! println!("Received: {:?}", sample); //! }; @@ -63,12 +63,12 @@ //! resources whose key match the given *key expression*. //! ``` //! use futures::prelude::*; -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).res().await.unwrap(); -//! let replies = session.get("key/expression").res().await.unwrap(); +//! let session = zenoh::open(config::default()).await.unwrap(); +//! let replies = session.get("key/expression").await.unwrap(); //! while let Ok(reply) = replies.recv_async().await { //! println!(">> Received {:?}", reply.result()); //! } @@ -117,10 +117,13 @@ pub mod prelude; /// Zenoh core types pub mod core { + #[allow(deprecated)] pub use zenoh_core::AsyncResolve; pub use zenoh_core::Resolvable; pub use zenoh_core::Resolve; + #[allow(deprecated)] pub use zenoh_core::SyncResolve; + pub use zenoh_core::Wait; /// A zenoh error. pub use zenoh_result::Error; /// A zenoh result. diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index ea084c453b..9ea54b8d88 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -31,7 +31,7 @@ use std::sync::Mutex; use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI}; -use zenoh_core::SyncResolve; +use zenoh_core::Wait; #[cfg(all(feature = "unstable", feature = "plugins"))] use zenoh_plugin_trait::{PluginControl, PluginStatus}; #[cfg(all(feature = "unstable", feature = "plugins"))] @@ -630,7 +630,7 @@ fn local_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(reply_key, payload) .encoding(Encoding::APPLICATION_JSON) - .res_sync() + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -662,7 +662,7 @@ zenoh_build{{version="{}"}} 1 .openmetrics_text(), ); - if let Err(e) = query.reply(reply_key, metrics).res() { + if let Err(e) = query.reply(reply_key, metrics).wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -679,7 +679,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Router)) - .res() + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -697,7 +697,7 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Peer)) - .res() + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -719,7 +719,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(key, payload) .encoding(Encoding::APPLICATION_JSON) - .res_sync() + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -743,7 +743,7 @@ fn queryables_data(context: &AdminContext, query: Query) { if let Err(e) = query .reply(key, payload) .encoding(Encoding::APPLICATION_JSON) - .res_sync() + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -768,7 +768,7 @@ fn plugins_data(context: &AdminContext, query: Query) { let status = serde_json::to_value(status).unwrap(); match ZBytes::try_from(status) { Ok(zbuf) => { - if let Err(e) = query.reply(key, zbuf).res_sync() { + if let Err(e) = query.reply(key, zbuf).wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -793,7 +793,7 @@ fn plugins_status(context: &AdminContext, query: Query) { with_extended_string(plugin_key, &["/__path__"], |plugin_path_key| { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { - if let Err(e) = query.reply(key_expr, plugin.path()).res() { + if let Err(e) = query.reply(key_expr, plugin.path()).wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -817,7 +817,7 @@ fn plugins_status(context: &AdminContext, query: Query) { if let Ok(key_expr) = KeyExpr::try_from(response.key) { match ZBytes::try_from(response.value) { Ok(zbuf) => { - if let Err(e) = query.reply(key_expr, zbuf).res_sync() { + if let Err(e) = query.reply(key_expr, zbuf).wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } }, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 17286ddeea..ac466ae50b 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -18,22 +18,11 @@ //! almost always want to import its entire contents, but unlike the standard //! library's prelude you'll have to do so manually. //! -//! There are three variants of the prelude: full, sync and async. The sync one excludes the [`AsyncResolve`](crate::core::AsyncResolve) trait and the async one excludes the [`SyncResolve`](crate::core::SyncResolve) trait. -//! When specific sync or async prelude is included, the `res()` function of buildes works synchronously or asynchronously, respectively. -//! -//! If root prelude is included, the `res_sync()` or `res_async()` function of builders should be called explicitly. -//! //! Examples: //! //! ``` //!use zenoh::prelude::*; //! ``` -//! ``` -//!use zenoh::prelude::sync::*; -//! ``` -//! ``` -//!use zenoh::prelude::r#async::*; -//! ``` // Reexport API in flat namespace pub(crate) mod flat { @@ -81,20 +70,27 @@ pub(crate) mod mods { pub use crate::value; } +#[allow(deprecated)] pub use crate::core::AsyncResolve; +#[allow(deprecated)] pub use crate::core::SyncResolve; +pub use crate::core::Wait; pub use flat::*; pub use mods::*; /// Prelude to import when using Zenoh's sync API. +#[deprecated = "use `zenoh::prelude` instead"] pub mod sync { pub use super::flat::*; pub use super::mods::*; + #[allow(deprecated)] pub use crate::core::SyncResolve; } /// Prelude to import when using Zenoh's async API. +#[deprecated = "use `zenoh::prelude` instead"] pub mod r#async { pub use super::flat::*; pub use super::mods::*; + #[allow(deprecated)] pub use crate::core::AsyncResolve; } diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index dd1aa1271d..5f3c482581 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -16,7 +16,7 @@ mod test { use std::sync::{Arc, Mutex}; use std::time::Duration; use tokio::runtime::Handle; - use zenoh::prelude::r#async::*; + use zenoh::prelude::*; use zenoh_core::{zlock, ztimeout}; const TIMEOUT: Duration = Duration::from_secs(60); @@ -46,22 +46,22 @@ mod test { async fn close_router_session(s: Session) { println!("Closing router session"); - ztimeout!(s.close().res_async()).unwrap(); + ztimeout!(s.close()).unwrap(); } async fn get_client_sessions() -> (Session, Session) { println!("Opening client sessions"); let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); - let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); - let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02) } async fn close_sessions(s01: Session, s02: Session) { println!("Closing client sessions"); - ztimeout!(s01.close().res_async()).unwrap(); - ztimeout!(s02.close().res_async()).unwrap(); + ztimeout!(s01.close()).unwrap(); + ztimeout!(s02.close()).unwrap(); } async fn test_pub_sub_deny() { @@ -82,15 +82,11 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = pub_session - .declare_publisher(KEY_EXPR) - .res_async() - .await - .unwrap(); + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); let subscriber = sub_session @@ -99,15 +95,14 @@ mod test { let mut temp_value = zlock!(temp_recv_value); *temp_value = sample.payload().deserialize::().unwrap(); }) - .res_async() .await .unwrap(); tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).res_async().await.unwrap(); + publisher.put(VALUE).await.unwrap(); tokio::time::sleep(SLEEP).await; assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; close_router_session(session).await; @@ -132,28 +127,28 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - }) - .res_async()) - .unwrap(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); tokio::time::sleep(SLEEP).await; - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + ztimeout!(publisher.put(VALUE)).unwrap(); tokio::time::sleep(SLEEP).await; assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; @@ -193,28 +188,28 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - }) - .res_async()) - .unwrap(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); tokio::time::sleep(SLEEP).await; - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + ztimeout!(publisher.put(VALUE)).unwrap(); tokio::time::sleep(SLEEP).await; assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; close_router_session(session).await; @@ -253,28 +248,28 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.payload().deserialize::().unwrap(); - }) - .res_async()) - .unwrap(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); tokio::time::sleep(SLEEP).await; - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + ztimeout!(publisher.put(VALUE)).unwrap(); tokio::time::sleep(SLEEP).await; assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; close_router_session(session).await; @@ -298,7 +293,7 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (get_session, qbl_session) = get_client_sessions().await; { @@ -309,15 +304,14 @@ mod test { .callback(move |sample| { tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { match reply.result() { Ok(sample) => { @@ -329,7 +323,7 @@ mod test { } tokio::time::sleep(SLEEP).await; assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; @@ -353,7 +347,7 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (get_session, qbl_session) = get_client_sessions().await; { @@ -364,15 +358,14 @@ mod test { .callback(move |sample| { tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { match reply.result() { Ok(sample) => { @@ -384,7 +377,7 @@ mod test { } tokio::time::sleep(SLEEP).await; assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; @@ -423,7 +416,7 @@ mod test { println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (get_session, qbl_session) = get_client_sessions().await; { @@ -434,15 +427,14 @@ mod test { .callback(move |sample| { tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { match reply.result() { Ok(sample) => { @@ -454,7 +446,7 @@ mod test { } tokio::time::sleep(SLEEP).await; assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; @@ -492,7 +484,7 @@ mod test { .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); let (get_session, qbl_session) = get_client_sessions().await; { @@ -503,15 +495,14 @@ mod test { .callback(move |sample| { tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(KEY_EXPR, VALUE).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { match reply.result() { Ok(sample) => { @@ -523,7 +514,7 @@ mod test { } tokio::time::sleep(SLEEP).await; assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index b98a656089..836845a645 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -15,8 +15,8 @@ #[test] fn attachment_pubsub() { use zenoh::bytes::ZBytes; - use zenoh::prelude::sync::*; - let zenoh = zenoh::open(Config::default()).res().unwrap(); + use zenoh::prelude::*; + let zenoh = zenoh::open(Config::default()).wait().unwrap(); let _sub = zenoh .declare_subscriber("test/attachment") .callback(|sample| { @@ -28,10 +28,10 @@ fn attachment_pubsub() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } }) - .res() + .wait() .unwrap(); - let publisher = zenoh.declare_publisher("test/attachment").res().unwrap(); + let publisher = zenoh.declare_publisher("test/attachment").wait().unwrap(); for i in 0..10 { let mut backer = [( [0; std::mem::size_of::()], @@ -44,12 +44,12 @@ fn attachment_pubsub() { zenoh .put("test/attachment", "put") .attachment(ZBytes::from_iter(backer.iter())) - .res() + .wait() .unwrap(); publisher .put("publisher") .attachment(ZBytes::from_iter(backer.iter())) - .res() + .wait() .unwrap(); } } @@ -57,8 +57,8 @@ fn attachment_pubsub() { #[cfg(feature = "unstable")] #[test] fn attachment_queries() { - use zenoh::prelude::sync::*; - let zenoh = zenoh::open(Config::default()).res().unwrap(); + use zenoh::prelude::*; + let zenoh = zenoh::open(Config::default()).wait().unwrap(); let _sub = zenoh .declare_queryable("test/attachment") .callback(|query| { @@ -90,10 +90,10 @@ fn attachment_queries() { )>() .map(|(k, _)| (k, k)), )) - .res() + .wait() .unwrap(); }) - .res() + .wait() .unwrap(); for i in 0..10 { let mut backer = [( @@ -108,7 +108,7 @@ fn attachment_queries() { .get("test/attachment") .payload("query") .attachment(ZBytes::from_iter(backer.iter())) - .res() + .wait() .unwrap(); while let Ok(reply) = get.recv() { let response = reply.result().unwrap(); diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index d99017ff43..67a1c9c093 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::prelude::sync::*; +use zenoh::prelude::*; #[test] fn retry_config_overriding() { @@ -164,7 +164,7 @@ fn listen_no_retry() { .unwrap(); config.insert_json5("listen/timeout_ms", "0").unwrap(); - zenoh::open(config).res().unwrap(); + zenoh::open(config).wait().unwrap(); } #[test] @@ -177,5 +177,5 @@ fn listen_with_retry() { config.insert_json5("listen/timeout_ms", "1000").unwrap(); - zenoh::open(config).res().unwrap(); + zenoh::open(config).wait().unwrap(); } diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index cbb38e90fc..99ca6055da 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -13,7 +13,7 @@ // use std::time::Duration; use zenoh::internal::ztimeout; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; const TIMEOUT: Duration = Duration::from_secs(10); @@ -29,25 +29,24 @@ async fn open_session(listen: &[&str], connect: &[&str]) -> Session { .collect::>(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening session"); - ztimeout!(zenoh::open(config).res_async()).unwrap() + ztimeout!(zenoh::open(config)).unwrap() } async fn close_session(session: Session) { println!("[ ][01d] Closing session"); - ztimeout!(session.close().res_async()).unwrap(); + ztimeout!(session.close()).unwrap(); } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_events() { let session = open_session(&["tcp/127.0.0.1:18447"], &[]).await; let zid = session.zid(); - let sub1 = ztimeout!(session - .declare_subscriber(format!("@/session/{zid}/transport/unicast/*")) - .res()) - .unwrap(); - let sub2 = ztimeout!(session - .declare_subscriber(format!("@/session/{zid}/transport/unicast/*/link/*")) - .res()) + let sub1 = + ztimeout!(session.declare_subscriber(format!("@/session/{zid}/transport/unicast/*"))) + .unwrap(); + let sub2 = ztimeout!( + session.declare_subscriber(format!("@/session/{zid}/transport/unicast/*/link/*")) + ) .unwrap(); let session2 = open_session(&["tcp/127.0.0.1:18448"], &["tcp/127.0.0.1:18447"]).await; @@ -65,23 +64,21 @@ async fn zenoh_events() { assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); - let replies: Vec = ztimeout!(session - .get(format!("@/session/{zid}/transport/unicast/*")) - .res_async()) - .unwrap() - .into_iter() - .collect(); + let replies: Vec = + ztimeout!(session.get(format!("@/session/{zid}/transport/unicast/*"))) + .unwrap() + .into_iter() + .collect(); assert!(replies.len() == 1); assert!(replies[0].result().is_ok()); let key_expr = replies[0].result().unwrap().key_expr().as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - let replies: Vec = ztimeout!(session - .get(format!("@/session/{zid}/transport/unicast/*/link/*")) - .res_async()) - .unwrap() - .into_iter() - .collect(); + let replies: Vec = + ztimeout!(session.get(format!("@/session/{zid}/transport/unicast/*/link/*"))) + .unwrap() + .into_iter() + .collect(); assert!(replies.len() == 1); assert!(replies[0].result().is_ok()); let key_expr = replies[0].result().unwrap().key_expr().as_str(); @@ -101,7 +98,7 @@ async fn zenoh_events() { assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); - ztimeout!(sub2.undeclare().res()).unwrap(); - ztimeout!(sub1.undeclare().res()).unwrap(); + ztimeout!(sub2.undeclare()).unwrap(); + ztimeout!(sub1.undeclare()).unwrap(); close_session(session).await; } diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index b83fead54b..0862f9ee89 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -12,20 +12,20 @@ // ZettaScale Zenoh Team, // use std::{thread, time::Duration}; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; #[test] fn pubsub_with_ringbuffer() { - let zenoh = zenoh::open(Config::default()).res().unwrap(); + let zenoh = zenoh::open(Config::default()).wait().unwrap(); let sub = zenoh .declare_subscriber("test/ringbuffer") .with(RingChannel::new(3)) - .res() + .wait() .unwrap(); for i in 0..10 { zenoh .put("test/ringbuffer", format!("put{i}")) - .res() + .wait() .unwrap(); } // Should only receive the last three samples ("put7", "put8", "put9") @@ -45,22 +45,22 @@ fn pubsub_with_ringbuffer() { #[test] fn query_with_ringbuffer() { - let zenoh = zenoh::open(Config::default()).res().unwrap(); + let zenoh = zenoh::open(Config::default()).wait().unwrap(); let queryable = zenoh .declare_queryable("test/ringbuffer_query") .with(RingChannel::new(1)) - .res() + .wait() .unwrap(); let _reply1 = zenoh .get("test/ringbuffer_query") .payload("query1") - .res() + .wait() .unwrap(); let _reply2 = zenoh .get("test/ringbuffer_query") .payload("query2") - .res() + .wait() .unwrap(); let query = queryable.recv().unwrap(); diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 7a3a9c80d6..f6e876d92e 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -13,7 +13,7 @@ // use std::sync::{Arc, Mutex}; use zenoh::internal::zlock; -use zenoh::prelude::sync::*; +use zenoh::prelude::*; struct IntervalCounter { first_tick: bool, @@ -89,7 +89,7 @@ fn downsampling_by_keyexpr_impl(egress: bool) { .multicast .set_enabled(Some(false)) .unwrap(); - let zenoh_sub = zenoh::open(config_sub).res().unwrap(); + let zenoh_sub = zenoh::open(config_sub).wait().unwrap(); let counter_r100 = Arc::new(Mutex::new(IntervalCounter::new())); let counter_r100_clone = counter_r100.clone(); @@ -110,7 +110,7 @@ fn downsampling_by_keyexpr_impl(egress: bool) { zlock!(counter_r50).tick(); } }) - .res() + .wait() .unwrap(); // declare publisher @@ -126,29 +126,29 @@ fn downsampling_by_keyexpr_impl(egress: bool) { .multicast .set_enabled(Some(false)) .unwrap(); - let zenoh_pub = zenoh::open(config_pub).res().unwrap(); + let zenoh_pub = zenoh::open(config_pub).wait().unwrap(); let publisher_r100 = zenoh_pub .declare_publisher("test/downsamples_by_keyexp/r100") - .res() + .wait() .unwrap(); let publisher_r50 = zenoh_pub .declare_publisher("test/downsamples_by_keyexp/r50") - .res() + .wait() .unwrap(); let publisher_all = zenoh_pub .declare_publisher("test/downsamples_by_keyexp/all") - .res() + .wait() .unwrap(); // WARN(yuyuan): 2 ms is the limit of tokio let interval = std::time::Duration::from_millis(2); let messages_count = 1000; for i in 0..messages_count { - publisher_r100.put(format!("message {}", i)).res().unwrap(); - publisher_r50.put(format!("message {}", i)).res().unwrap(); - publisher_all.put(format!("message {}", i)).res().unwrap(); + publisher_r100.put(format!("message {}", i)).wait().unwrap(); + publisher_r50.put(format!("message {}", i)).wait().unwrap(); + publisher_all.put(format!("message {}", i)).wait().unwrap(); std::thread::sleep(interval); } @@ -205,7 +205,7 @@ fn downsampling_by_interface_impl(egress: bool) { if !egress { config_sub.insert_json5("downsampling", &ds_cfg).unwrap(); }; - let zenoh_sub = zenoh::open(config_sub).res().unwrap(); + let zenoh_sub = zenoh::open(config_sub).wait().unwrap(); let counter_r100 = Arc::new(Mutex::new(IntervalCounter::new())); let counter_r100_clone = counter_r100.clone(); @@ -222,7 +222,7 @@ fn downsampling_by_interface_impl(egress: bool) { zlock!(counter_r100).tick(); } }) - .res() + .wait() .unwrap(); // declare publisher @@ -233,23 +233,23 @@ fn downsampling_by_interface_impl(egress: bool) { if egress { config_pub.insert_json5("downsampling", &ds_cfg).unwrap(); } - let zenoh_pub = zenoh::open(config_pub).res().unwrap(); + let zenoh_pub = zenoh::open(config_pub).wait().unwrap(); let publisher_r100 = zenoh_pub .declare_publisher("test/downsamples_by_interface/r100") - .res() + .wait() .unwrap(); let publisher_all = zenoh_pub .declare_publisher("test/downsamples_by_interface/all") - .res() + .wait() .unwrap(); // WARN(yuyuan): 2 ms is the limit of tokio let interval = std::time::Duration::from_millis(2); let messages_count = 1000; for i in 0..messages_count { - publisher_r100.put(format!("message {}", i)).res().unwrap(); - publisher_all.put(format!("message {}", i)).res().unwrap(); + publisher_r100.put(format!("message {}", i)).wait().unwrap(); + publisher_all.put(format!("message {}", i)).wait().unwrap(); std::thread::sleep(interval); } @@ -295,5 +295,5 @@ fn downsampling_config_error_wrong_strategy() { ) .unwrap(); - zenoh::open(config).res().unwrap(); + zenoh::open(config).wait().unwrap(); } diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 1cd0830ea2..0456361419 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -23,33 +23,24 @@ async fn zenoh_liveliness() { .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) .unwrap(); c1.scouting.multicast.set_enabled(Some(false)).unwrap(); - let session1 = ztimeout!(zenoh::open(c1).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(c1)).unwrap(); let mut c2 = config::peer(); c2.connect .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) .unwrap(); c2.scouting.multicast.set_enabled(Some(false)).unwrap(); - let session2 = ztimeout!(zenoh::open(c2).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(c2)).unwrap(); let sub = ztimeout!(session2 .liveliness() - .declare_subscriber("zenoh_liveliness_test") - .res_async()) + .declare_subscriber("zenoh_liveliness_test")) .unwrap(); - let token = ztimeout!(session1 - .liveliness() - .declare_token("zenoh_liveliness_test") - .res_async()) - .unwrap(); + let token = ztimeout!(session1.liveliness().declare_token("zenoh_liveliness_test")).unwrap(); tokio::time::sleep(SLEEP).await; - let replies = ztimeout!(session2 - .liveliness() - .get("zenoh_liveliness_test") - .res_async()) - .unwrap(); + let replies = ztimeout!(session2.liveliness().get("zenoh_liveliness_test")).unwrap(); let sample: Sample = ztimeout!(replies.recv_async()) .unwrap() .into_result() @@ -67,11 +58,7 @@ async fn zenoh_liveliness() { tokio::time::sleep(SLEEP).await; - let replies = ztimeout!(session2 - .liveliness() - .get("zenoh_liveliness_test") - .res_async()) - .unwrap(); + let replies = ztimeout!(session2.liveliness().get("zenoh_liveliness_test")).unwrap(); assert!(ztimeout!(replies.recv_async()).is_err()); assert!(replies.try_recv().is_err()); diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index 4e838f98a1..1473d7f6fc 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -35,8 +35,8 @@ async fn create_session_pair(locator: &str) -> (Session, Session) { }; let config2 = zenoh::config::client([Locator::from_str(locator).unwrap()]); - let session1 = ztimeout!(zenoh::open(config1).res_async()).unwrap(); - let session2 = ztimeout!(zenoh::open(config2).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(config1)).unwrap(); + let session2 = ztimeout!(zenoh::open(config2)).unwrap(); (session1, session2) } @@ -47,54 +47,47 @@ async fn zenoh_matching_status_any() -> ZResult<()> { let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_any_test") - .allowed_destination(Locality::Any) - .res_async()) + .allowed_destination(Locality::Any)) .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_any_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1.declare_subscriber("zenoh_matching_status_any_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_any_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2.declare_subscriber("zenoh_matching_status_any_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); Ok(()) } @@ -102,60 +95,53 @@ async fn zenoh_matching_status_any() -> ZResult<()> { #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_remote() -> ZResult<()> { - let session1 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(peer())).unwrap(); - let session2 = ztimeout!(zenoh::open(peer()).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_remote_test") - .allowed_destination(Locality::Remote) - .res_async()) + .allowed_destination(Locality::Remote)) .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_remote_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1.declare_subscriber("zenoh_matching_status_remote_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_remote_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2.declare_subscriber("zenoh_matching_status_remote_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); Ok(()) @@ -164,60 +150,53 @@ async fn zenoh_matching_status_remote() -> ZResult<()> { #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_matching_status_local() -> ZResult<()> { - let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(config::peer())).unwrap(); - let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session2 = ztimeout!(zenoh::open(config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_local_test") - .allowed_destination(Locality::SessionLocal) - .res_async()) + .allowed_destination(Locality::SessionLocal)) .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_local_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1.declare_subscriber("zenoh_matching_status_local_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_local_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2.declare_subscriber("zenoh_matching_status_local_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); Ok(()) diff --git a/zenoh/tests/payload.rs b/zenoh/tests/payload.rs index fac5d37367..fecf10a608 100644 --- a/zenoh/tests/payload.rs +++ b/zenoh/tests/payload.rs @@ -15,7 +15,7 @@ #[test] #[cfg(all(feature = "shared-memory", feature = "unstable"))] fn shm_payload_single_buf() { - use zenoh::prelude::r#async::*; + use zenoh::prelude::*; // create an SHM backend... let backend = PosixSharedMemoryProviderBackend::builder() diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index b70d01ec79..6f44b2d0be 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -13,40 +13,38 @@ // use std::time::Duration; use zenoh::internal::ztimeout; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn pubsub() { - let session1 = ztimeout!(zenoh::open(zenoh_config::peer()).res_async()).unwrap(); - let session2 = ztimeout!(zenoh::open(zenoh_config::peer()).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(zenoh_config::peer())).unwrap(); + let session2 = ztimeout!(zenoh::open(zenoh_config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("test/qos") .priority(Priority::DataHigh) - .congestion_control(CongestionControl::Drop) - .res()) + .congestion_control(CongestionControl::Drop)) .unwrap(); let publisher2 = ztimeout!(session1 .declare_publisher("test/qos") .priority(Priority::DataLow) - .congestion_control(CongestionControl::Block) - .res()) + .congestion_control(CongestionControl::Block)) .unwrap(); - let subscriber = ztimeout!(session2.declare_subscriber("test/qos").res()).unwrap(); + let subscriber = ztimeout!(session2.declare_subscriber("test/qos")).unwrap(); tokio::time::sleep(SLEEP).await; - ztimeout!(publisher1.put("qos").res_async()).unwrap(); + ztimeout!(publisher1.put("qos")).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); assert_eq!(sample.priority(), Priority::DataHigh); assert_eq!(sample.congestion_control(), CongestionControl::Drop); - ztimeout!(publisher2.put("qos").res_async()).unwrap(); + ztimeout!(publisher2.put("qos")).unwrap(); let sample = ztimeout!(subscriber.recv_async()).unwrap(); assert_eq!(sample.priority(), Priority::DataLow); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index dd6e7fd715..3c9f2723a6 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -18,7 +18,7 @@ use std::time::Duration; use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh::core::Result; use zenoh::internal::{bail, ztimeout}; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; @@ -47,7 +47,7 @@ impl Task { match self { // The Sub task checks if the incoming message matches the expected size until it receives enough counts. Self::Sub(ke, expected_size) => { - let sub = ztimeout!(session.declare_subscriber(ke).res_async())?; + let sub = ztimeout!(session.declare_subscriber(ke))?; let mut counter = 0; loop { tokio::select! { @@ -77,10 +77,11 @@ impl Task { _ = token.cancelled() => break, // WARN: this won't yield after a timeout since the put is a blocking call - res = tokio::time::timeout(std::time::Duration::from_secs(1), session + res = tokio::time::timeout(std::time::Duration::from_secs(1), async {session .put(ke, vec![0u8; *payload_size]) .congestion_control(CongestionControl::Block) - .res()) => { + .await + }) => { let _ = res?; } } @@ -94,7 +95,7 @@ impl Task { while counter < MSG_COUNT { tokio::select! { _ = token.cancelled() => break, - replies = session.get(ke).timeout(Duration::from_secs(10)).res() => { + replies = async { session.get(ke).timeout(Duration::from_secs(10)).await } => { let replies = replies?; while let Ok(reply) = replies.recv_async().await { match reply.result() { @@ -124,14 +125,14 @@ impl Task { // The Queryable task keeps replying to requested messages until all checkpoints are finished. Self::Queryable(ke, payload_size) => { - let queryable = ztimeout!(session.declare_queryable(ke).res_async())?; + let queryable = ztimeout!(session.declare_queryable(ke))?; let payload = vec![0u8; *payload_size]; loop { tokio::select! { _ = token.cancelled() => break, query = queryable.recv_async() => { - ztimeout!(query?.reply(ke.to_owned(), payload.clone()).res_async())?; + ztimeout!(query?.reply(ke.to_owned(), payload.clone()))?; }, } } @@ -276,7 +277,7 @@ impl Recipe { // In case of client can't connect to some peers/routers loop { - if let Ok(session) = ztimeout!(zenoh::open(config.clone()).res_async()) { + if let Ok(session) = ztimeout!(zenoh::open(config.clone())) { break session.into_arc(); } else { tokio::time::sleep(Duration::from_secs(1)).await; @@ -312,7 +313,7 @@ impl Recipe { // node_task_tracker.wait().await; // Close the session once all the task assoicated with the node are done. - ztimeout!(Arc::try_unwrap(session).unwrap().close().res_async())?; + ztimeout!(Arc::try_unwrap(session).unwrap().close())?; println!("Node: {} is closed.", &node.name); Result::Ok(()) diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 91d9b6d95b..b52dbb90b8 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -15,7 +15,8 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::internal::ztimeout; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; +#[cfg(feature = "unstable")] use zenoh::runtime::Runtime; const TIMEOUT: Duration = Duration::from_secs(60); @@ -33,7 +34,7 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { .collect::>(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening peer01 session: {:?}", endpoints); - let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.connect.endpoints = endpoints @@ -42,7 +43,7 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { .collect::>(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Opening peer02 session: {:?}", endpoints); - let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); (peer01, peer02) } @@ -53,22 +54,22 @@ async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, config.listen.endpoints = vec![endpoint01.parse().unwrap()]; config.scouting.multicast.set_enabled(Some(true)).unwrap(); println!("[ ][01a] Opening peer01 session: {}", endpoint01); - let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.listen.endpoints = vec![endpoint02.parse().unwrap()]; config.scouting.multicast.set_enabled(Some(true)).unwrap(); println!("[ ][02a] Opening peer02 session: {}", endpoint02); - let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); (peer01, peer02) } async fn close_session(peer01: Session, peer02: Session) { println!("[ ][01d] Closing peer01 session"); - ztimeout!(peer01.close().res_async()).unwrap(); + ztimeout!(peer01.close()).unwrap(); println!("[ ][02d] Closing peer02 session"); - ztimeout!(peer02.close().res_async()).unwrap(); + ztimeout!(peer02.close()).unwrap(); } async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Reliability) { @@ -85,13 +86,10 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re // Subscribe to data println!("[PS][01b] Subscribing on peer01 session"); let c_msgs = msgs.clone(); - let sub = ztimeout!(peer01 - .declare_subscriber(key_expr) - .callback(move |sample| { - assert_eq!(sample.payload().len(), size); - c_msgs.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + let sub = ztimeout!(peer01.declare_subscriber(key_expr).callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs.fetch_add(1, Ordering::Relaxed); + })) .unwrap(); // Wait for the declaration to propagate @@ -102,8 +100,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re for _ in 0..msg_count { ztimeout!(peer02 .put(key_expr, vec![0u8; size]) - .congestion_control(CongestionControl::Block) - .res_async()) + .congestion_control(CongestionControl::Block)) .unwrap(); } @@ -123,7 +120,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re tokio::time::sleep(SLEEP).await; println!("[PS][03b] Unsubscribing on peer01 session"); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; @@ -144,43 +141,36 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re // Queryable to data println!("[QR][01c] Queryable on peer01 session"); let c_msgs = msgs.clone(); - let qbl = ztimeout!(peer01 - .declare_queryable(key_expr) - .callback(move |query| { - c_msgs.fetch_add(1, Ordering::Relaxed); - match query.parameters().as_str() { - "ok_put" => { - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - ztimeout!(query - .reply( - KeyExpr::try_from(key_expr).unwrap(), - vec![0u8; size].to_vec() - ) - .res_async()) - .unwrap() - }) - }); - } - "ok_del" => { - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - ztimeout!(query.reply_del(key_expr).res_async()).unwrap() - }) - }); - } - "err" => { - let rep = Value::from(vec![0u8; size]); - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - ztimeout!(query.reply_err(rep).res_async()).unwrap() - }) - }); - } - _ => panic!("Unknown query parameter"), + let qbl = ztimeout!(peer01.declare_queryable(key_expr).callback(move |query| { + c_msgs.fetch_add(1, Ordering::Relaxed); + match query.parameters().as_str() { + "ok_put" => { + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + ztimeout!(query.reply( + KeyExpr::try_from(key_expr).unwrap(), + vec![0u8; size].to_vec() + )) + .unwrap() + }) + }); } - }) - .res_async()) + "ok_del" => { + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current() + .block_on(async { ztimeout!(query.reply_del(key_expr)).unwrap() }) + }); + } + "err" => { + let rep = Value::from(vec![0u8; size]); + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current() + .block_on(async { ztimeout!(query.reply_err(rep)).unwrap() }) + }); + } + _ => panic!("Unknown query parameter"), + } + })) .unwrap(); // Wait for the declaration to propagate @@ -191,7 +181,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let mut cnt = 0; for _ in 0..msg_count { let selector = format!("{}?ok_put", key_expr); - let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + let rs = ztimeout!(peer02.get(selector)).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.result().unwrap(); assert_eq!(s.kind(), SampleKind::Put); @@ -209,7 +199,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let mut cnt = 0; for _ in 0..msg_count { let selector = format!("{}?ok_del", key_expr); - let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + let rs = ztimeout!(peer02.get(selector)).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let s = s.result().unwrap(); assert_eq!(s.kind(), SampleKind::Delete); @@ -227,7 +217,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let mut cnt = 0; for _ in 0..msg_count { let selector = format!("{}?err", key_expr); - let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + let rs = ztimeout!(peer02.get(selector)).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { let e = s.result().unwrap_err(); assert_eq!(e.payload().len(), size); @@ -239,7 +229,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re assert_eq!(cnt, msg_count); println!("[PS][03c] Unqueryable on peer01 session"); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; @@ -264,6 +254,7 @@ async fn zenoh_session_multicast() { close_session(peer01, peer02).await; } +#[cfg(feature = "unstable")] async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) { // Open the sessions let mut config = config::peer(); @@ -287,15 +278,16 @@ async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) (r1, r2) } +#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_2sessions_1runtime_init() { let (r1, r2) = open_session_unicast_runtime(&["tcp/127.0.0.1:17449"]).await; println!("[RI][02a] Creating peer01 session from runtime 1"); - let peer01 = zenoh::session::init(r1.clone()).res_async().await.unwrap(); + let peer01 = zenoh::session::init(r1.clone()).await.unwrap(); println!("[RI][02b] Creating peer02 session from runtime 2"); - let peer02 = zenoh::session::init(r2.clone()).res_async().await.unwrap(); + let peer02 = zenoh::session::init(r2.clone()).await.unwrap(); println!("[RI][02c] Creating peer01a session from runtime 1"); - let peer01a = zenoh::session::init(r1.clone()).res_async().await.unwrap(); + let peer01a = zenoh::session::init(r1.clone()).await.unwrap(); println!("[RI][03c] Closing peer01a session"); std::mem::drop(peer01a); test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index a7bc481e27..ec77890c1e 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -17,7 +17,7 @@ mod tests { use std::sync::Arc; use std::time::Duration; use zenoh::internal::ztimeout; - use zenoh::prelude::r#async::*; + use zenoh::prelude::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); @@ -35,7 +35,7 @@ mod tests { config.scouting.multicast.set_enabled(Some(false)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][01a] Opening peer01 session: {:?}", endpoints); - let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.connect.endpoints = endpoints @@ -45,7 +45,7 @@ mod tests { config.scouting.multicast.set_enabled(Some(false)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][02a] Opening peer02 session: {:?}", endpoints); - let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); (peer01, peer02) } @@ -57,23 +57,23 @@ mod tests { config.scouting.multicast.set_enabled(Some(true)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][01a] Opening peer01 session: {}", endpoint01); - let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.listen.endpoints = vec![endpoint02.parse().unwrap()]; config.scouting.multicast.set_enabled(Some(true)).unwrap(); config.transport.shared_memory.set_enabled(true).unwrap(); println!("[ ][02a] Opening peer02 session: {}", endpoint02); - let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); (peer01, peer02) } async fn close_session(peer01: Session, peer02: Session) { println!("[ ][01d] Closing peer02 session"); - ztimeout!(peer01.close().res_async()).unwrap(); + ztimeout!(peer01.close()).unwrap(); println!("[ ][02d] Closing peer02 session"); - ztimeout!(peer02.close().res_async()).unwrap(); + ztimeout!(peer02.close()).unwrap(); } async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Reliability) { @@ -96,8 +96,7 @@ mod tests { .callback(move |sample| { assert_eq!(sample.payload().len(), size); c_msgs.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + })) .unwrap(); // Wait for the declaration to propagate @@ -135,8 +134,7 @@ mod tests { // Publish this message ztimeout!(peer02 .put(&key_expr, sbuf) - .congestion_control(CongestionControl::Block) - .res_async()) + .congestion_control(CongestionControl::Block)) .unwrap(); println!("{c} putted"); } diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index fcddcf3b3e..c5be555a00 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -16,7 +16,7 @@ use std::sync::Arc; use std::time::Duration; use tokio::runtime::Handle; use zenoh::internal::ztimeout; -use zenoh::prelude::r#async::*; +use zenoh::prelude::*; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); @@ -29,14 +29,14 @@ async fn open_p2p_sessions() -> (Session, Session, Session) { config.listen.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening s01 session"); - let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.listen.endpoints = vec!["tcp/127.0.0.1:27448".parse().unwrap()]; config.connect.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Opening s02 session"); - let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); config.connect.endpoints = vec![ @@ -45,7 +45,7 @@ async fn open_p2p_sessions() -> (Session, Session, Session) { ]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][03a] Opening s03 session"); - let s03 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s03 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02, s03) } @@ -57,38 +57,38 @@ async fn open_router_session() -> Session { config.listen.endpoints = vec!["tcp/127.0.0.1:37447".parse().unwrap()]; config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][00a] Opening router session"); - ztimeout!(zenoh::open(config).res_async()).unwrap() + ztimeout!(zenoh::open(config)).unwrap() } async fn close_router_session(s: Session) { println!("[ ][01d] Closing router session"); - ztimeout!(s.close().res_async()).unwrap(); + ztimeout!(s.close()).unwrap(); } async fn open_client_sessions() -> (Session, Session, Session) { // Open the sessions let config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); println!("[ ][01a] Opening s01 session"); - let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); let config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); println!("[ ][02a] Opening s02 session"); - let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); let config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); println!("[ ][03a] Opening s03 session"); - let s03 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s03 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02, s03) } async fn close_sessions(s01: Session, s02: Session, s03: Session) { println!("[ ][01d] Closing s01 session"); - ztimeout!(s01.close().res_async()).unwrap(); + ztimeout!(s01.close()).unwrap(); println!("[ ][02d] Closing s02 session"); - ztimeout!(s02.close().res_async()).unwrap(); + ztimeout!(s02.close()).unwrap(); println!("[ ][03d] Closing s03 session"); - ztimeout!(s03.close().res_async()).unwrap(); + ztimeout!(s03.close()).unwrap(); } async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { @@ -104,25 +104,19 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { // Subscribe to data println!("[PS][01b] Subscribing on s01 session"); let c_msgs1 = msgs1.clone(); - let sub1 = ztimeout!(s01 - .declare_subscriber(key_expr) - .callback(move |sample| { - assert_eq!(sample.payload().len(), size); - c_msgs1.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + let sub1 = ztimeout!(s01.declare_subscriber(key_expr).callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs1.fetch_add(1, Ordering::Relaxed); + })) .unwrap(); // Subscribe to data println!("[PS][02b] Subscribing on s02 session"); let c_msgs2 = msgs2.clone(); - let sub2 = ztimeout!(s02 - .declare_subscriber(key_expr) - .callback(move |sample| { - assert_eq!(sample.payload().len(), size); - c_msgs2.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + let sub2 = ztimeout!(s02.declare_subscriber(key_expr).callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs2.fetch_add(1, Ordering::Relaxed); + })) .unwrap(); // Wait for the declaration to propagate @@ -133,8 +127,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { for _ in 0..msg_count { ztimeout!(s03 .put(key_expr, vec![0u8; size]) - .congestion_control(CongestionControl::Block) - .res_async()) + .congestion_control(CongestionControl::Block)) .unwrap(); } @@ -162,10 +155,10 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { assert_eq!(cnt2, msg_count); println!("[PS][02b] Unsubscribing on s02 session"); - ztimeout!(sub2.undeclare().res_async()).unwrap(); + ztimeout!(sub2.undeclare()).unwrap(); println!("[PS][01b] Unsubscribing on s01 session"); - ztimeout!(sub1.undeclare().res_async()).unwrap(); + ztimeout!(sub1.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; @@ -186,42 +179,34 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { println!("[QR][01c] Queryable on s01 session"); let cke = key_expr.clone(); let c_msgs1 = msgs1.clone(); - let qbl1 = ztimeout!(s01 - .declare_queryable(cke.clone()) - .callback(move |sample| { - c_msgs1.fetch_add(1, Ordering::Relaxed); - tokio::task::block_in_place({ - let cke2 = cke.clone(); - move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(cke2.clone(), vec![0u8; size]).res_async()) - .unwrap() - }); - } - }); - }) - .res_async()) + let qbl1 = ztimeout!(s01.declare_queryable(cke.clone()).callback(move |sample| { + c_msgs1.fetch_add(1, Ordering::Relaxed); + tokio::task::block_in_place({ + let cke2 = cke.clone(); + move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(cke2.clone(), vec![0u8; size])).unwrap() + }); + } + }); + })) .unwrap(); // Queryable to data println!("[QR][02c] Queryable on s02 session"); let cke = key_expr.clone(); let c_msgs2 = msgs2.clone(); - let qbl2 = ztimeout!(s02 - .declare_queryable(cke.clone()) - .callback(move |sample| { - c_msgs2.fetch_add(1, Ordering::Relaxed); - tokio::task::block_in_place({ - let cke2 = cke.clone(); - move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(cke2.clone(), vec![0u8; size]).res_async()) - .unwrap() - }); - } - }); - }) - .res_async()) + let qbl2 = ztimeout!(s02.declare_queryable(cke.clone()).callback(move |sample| { + c_msgs2.fetch_add(1, Ordering::Relaxed); + tokio::task::block_in_place({ + let cke2 = cke.clone(); + move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(cke2.clone(), vec![0u8; size])).unwrap() + }); + } + }); + })) .unwrap(); // Wait for the declaration to propagate @@ -232,7 +217,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { let cke = key_expr.clone(); let mut cnt = 0; for _ in 0..msg_count { - let rs = ztimeout!(s03.get(cke.clone()).res_async()).unwrap(); + let rs = ztimeout!(s03.get(cke.clone())).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { assert_eq!(s.result().unwrap().payload().len(), size); cnt += 1; @@ -248,10 +233,10 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { assert_eq!(cnt, msg_count); println!("[PS][01c] Unqueryable on s01 session"); - ztimeout!(qbl1.undeclare().res_async()).unwrap(); + ztimeout!(qbl1.undeclare()).unwrap(); println!("[PS][02c] Unqueryable on s02 session"); - ztimeout!(qbl2.undeclare().res_async()).unwrap(); + ztimeout!(qbl2.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index d8fed7eeb4..cabee33333 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -19,7 +19,6 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; use zenoh::config::EndPoint; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, ValidatedMap}; -use zenoh::core::AsyncResolve; use zenoh::core::Result; use zenoh::scouting::WhatAmI; @@ -107,7 +106,7 @@ fn main() { let config = config_from_args(&args); tracing::info!("Initial conf: {}", &config); - let _session = match zenoh::open(config).res_async().await { + let _session = match zenoh::open(config).await { Ok(runtime) => runtime, Err(e) => { println!("{e}. Exiting..."); From b1b1e91011072402744836ce0d66160d710bbc8b Mon Sep 17 00:00:00 2001 From: Michael Ilyin Date: Mon, 29 Apr 2024 16:41:03 +0200 Subject: [PATCH 308/357] missing API items reexported (#989) --- zenoh/src/lib.rs | 52 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 6f679407c8..c4247b73da 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -181,6 +181,7 @@ pub mod key_expr { pub use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; } pub use crate::api::key_expr::KeyExpr; + pub use crate::api::key_expr::KeyExprUndeclaration; pub use zenoh_keyexpr::keyexpr; pub use zenoh_keyexpr::OwnedKeyExpr; pub use zenoh_keyexpr::SetIntersectionLevel; @@ -202,24 +203,42 @@ pub mod session { #[doc(hidden)] pub use crate::api::session::init; pub use crate::api::session::open; + #[zenoh_macros::unstable] + #[doc(hidden)] + pub use crate::api::session::InitBuilder; + pub use crate::api::session::OpenBuilder; pub use crate::api::session::Session; pub use crate::api::session::SessionDeclarations; pub use crate::api::session::SessionRef; + pub use crate::api::session::Undeclarable; +} + +/// Tools to access information about the current zenoh [`Session`](crate::Session). +pub mod info { + pub use crate::api::info::PeersZidBuilder; + pub use crate::api::info::RoutersZidBuilder; + pub use crate::api::info::SessionInfo; + pub use crate::api::info::ZidBuilder; } /// Sample primitives pub mod sample { pub use crate::api::builders::sample::QoSBuilderTrait; pub use crate::api::builders::sample::SampleBuilder; + pub use crate::api::builders::sample::SampleBuilderAny; + pub use crate::api::builders::sample::SampleBuilderDelete; + pub use crate::api::builders::sample::SampleBuilderPut; pub use crate::api::builders::sample::SampleBuilderTrait; pub use crate::api::builders::sample::TimestampBuilderTrait; pub use crate::api::builders::sample::ValueBuilderTrait; #[zenoh_macros::unstable] pub use crate::api::sample::Locality; pub use crate::api::sample::Sample; + pub use crate::api::sample::SampleFields; pub use crate::api::sample::SampleKind; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; + pub use crate::api::sample::SourceSn; } /// Value primitives @@ -235,10 +254,14 @@ pub mod encoding { /// Payload primitives pub mod bytes { pub use crate::api::bytes::Deserialize; + pub use crate::api::bytes::OptionZBytes; pub use crate::api::bytes::Serialize; pub use crate::api::bytes::StringOrBase64; pub use crate::api::bytes::ZBytes; + pub use crate::api::bytes::ZBytesIterator; pub use crate::api::bytes::ZBytesReader; + pub use crate::api::bytes::ZBytesWriter; + pub use crate::api::bytes::ZDeserializeError; pub use crate::api::bytes::ZSerde; } @@ -262,18 +285,31 @@ pub mod subscriber { /// Publishing primitives pub mod publication { + pub use crate::api::builders::publication::PublicationBuilderDelete; + pub use crate::api::builders::publication::PublicationBuilderPut; pub use crate::api::builders::publication::PublisherBuilder; + pub use crate::api::builders::publication::PublisherDeleteBuilder; #[zenoh_macros::unstable] pub use crate::api::publication::MatchingListener; + #[zenoh_macros::unstable] + pub use crate::api::publication::MatchingListenerBuilder; + #[zenoh_macros::unstable] + pub use crate::api::publication::MatchingListenerUndeclaration; + #[zenoh_macros::unstable] + pub use crate::api::publication::MatchingStatus; pub use crate::api::publication::Priority; pub use crate::api::publication::Publisher; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; + #[zenoh_macros::unstable] + pub use crate::api::publication::PublisherRef; + pub use crate::api::publication::PublisherUndeclaration; pub use zenoh_protocol::core::CongestionControl; } /// Query primitives pub mod query { + pub use crate::api::query::GetBuilder; pub use crate::api::query::Reply; #[zenoh_macros::unstable] pub use crate::api::query::ReplyKeyExpr; @@ -287,19 +323,31 @@ pub mod queryable { pub use crate::api::queryable::Query; pub use crate::api::queryable::Queryable; pub use crate::api::queryable::QueryableBuilder; + pub use crate::api::queryable::QueryableUndeclaration; + pub use crate::api::queryable::ReplyBuilder; + pub use crate::api::queryable::ReplyBuilderDelete; + pub use crate::api::queryable::ReplyBuilderPut; + pub use crate::api::queryable::ReplyErrBuilder; + #[zenoh_macros::unstable] + pub use crate::api::queryable::ReplySample; } /// Callback handler trait pub mod handlers { pub use crate::api::handlers::locked; + pub use crate::api::handlers::Callback; + pub use crate::api::handlers::CallbackDrop; pub use crate::api::handlers::DefaultHandler; + pub use crate::api::handlers::FifoChannel; pub use crate::api::handlers::IntoHandler; pub use crate::api::handlers::RingChannel; + pub use crate::api::handlers::RingChannelHandler; } /// Scouting primitives pub mod scouting { pub use crate::api::scouting::scout; + pub use crate::api::scouting::Scout; pub use crate::api::scouting::ScoutBuilder; /// Constants and helpers for zenoh `whatami` flags. pub use zenoh_protocol::core::WhatAmI; @@ -311,8 +359,11 @@ pub mod scouting { #[cfg(feature = "unstable")] pub mod liveliness { pub use crate::api::liveliness::Liveliness; + pub use crate::api::liveliness::LivelinessGetBuilder; pub use crate::api::liveliness::LivelinessSubscriberBuilder; pub use crate::api::liveliness::LivelinessToken; + pub use crate::api::liveliness::LivelinessTokenBuilder; + pub use crate::api::liveliness::LivelinessTokenUndeclaration; } /// Timestamp support @@ -345,6 +396,7 @@ pub mod plugins { pub use crate::api::plugins::PluginsManager; pub use crate::api::plugins::Response; pub use crate::api::plugins::RunningPlugin; + pub use crate::api::plugins::PLUGIN_PREFIX; pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; } From c3f993da4baf385adb04d17f911790ca0becec41 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Mon, 29 Apr 2024 18:44:09 +0200 Subject: [PATCH 309/357] Add attachment_mut to Sample --- zenoh/src/api/sample.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 365c2a7728..11bfc92c0b 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -355,6 +355,13 @@ impl Sample { pub fn attachment(&self) -> Option<&ZBytes> { self.attachment.as_ref() } + + /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. + #[zenoh_macros::unstable] + #[inline] + pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { + self.attachment.as_mut() + } } impl From for Value { From d4218fccddf6e5b52539082dd823a19817b3ef44 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 30 Apr 2024 13:04:05 +0300 Subject: [PATCH 310/357] [skip ci] fix SHM exports in new api export mechanism --- zenoh/src/lib.rs | 39 ++++++++++++++++++++++++++------------- zenoh/src/prelude.rs | 4 ++-- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 2a238ea875..b8be3d905c 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -362,19 +362,32 @@ pub mod internal { #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { - pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; - pub use zenoh_shm::api::provider::shared_memory_provider::{BlockOn, GarbageCollect}; - pub use zenoh_shm::api::provider::shared_memory_provider::{Deallocate, Defragment}; - pub use zenoh_shm::api::provider::types::AllocAlignment; - pub use zenoh_shm::api::provider::types::MemoryLayout; - pub use zenoh_shm::api::slice::zsliceshm::{zsliceshm, ZSliceShm}; - pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; - pub use zenoh_shm::api::{ - protocol_implementations::posix::{ - posix_shared_memory_client::PosixSharedMemoryClient, - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, - protocol_id::POSIX_PROTOCOL_ID, + pub use zenoh_shm::api::client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }; + pub use zenoh_shm::api::client_storage::{SharedMemoryClientStorage, GLOBAL_CLIENT_STORAGE}; + pub use zenoh_shm::api::common::types::{ChunkID, ProtocolID, SegmentID}; + pub use zenoh_shm::api::protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, + posix_shared_memory_provider_backend::{ + LayoutedPosixSharedMemoryProviderBackendBuilder, PosixSharedMemoryProviderBackend, + PosixSharedMemoryProviderBackendBuilder, }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, + protocol_id::POSIX_PROTOCOL_ID, + }; + pub use zenoh_shm::api::provider::shared_memory_provider::{ + AllocBuilder, AllocLayout, AllocLayoutAlignedBuilder, AllocLayoutBuilder, + AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, + DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, + ForceDeallocPolicy, GarbageCollect, JustAlloc, ProtocolIDSource, SharedMemoryProvider, + SharedMemoryProviderBuilder, SharedMemoryProviderBuilderBackendID, + SharedMemoryProviderBuilderID, StaticProtocolID, + }; + pub use zenoh_shm::api::provider::types::{ + AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError, + }; + pub use zenoh_shm::api::slice::{ + zsliceshm::{zsliceshm, ZSliceShm}, + zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, }; } diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 17286ddeea..ac60f16c89 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -51,7 +51,7 @@ pub(crate) mod flat { pub use crate::scouting::*; pub use crate::selector::*; pub use crate::session::*; - #[cfg(feature = "shared-memory")] + #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub use crate::shm::*; pub use crate::subscriber::*; pub use crate::time::*; @@ -74,7 +74,7 @@ pub(crate) mod mods { pub use crate::scouting; pub use crate::selector; pub use crate::session; - #[cfg(feature = "shared-memory")] + #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub use crate::shm; pub use crate::subscriber; pub use crate::time; From 3c6327727997cbea4a02a5046f00e9a1e06e0c3f Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 30 Apr 2024 16:02:09 +0200 Subject: [PATCH 311/357] Merge main in protocol changes (#997) * fix(zenoh_runtime): disable atexit on windows (#981) * Revert "fix(zenoh-runtime): zenoh-c DLL crash in `libc::atexit` handler (#972)" This reverts commit 274166d778945be0bb9250944f1374e3c0dfc892. * ci: disable atexit cleanup on Windows * fix: Deny publishing of zenoh-ext-examples (#984) * Fix runtime start calling (#985) * Fix invalid JSON in admin space for static plugins (#988) * Fix admin space: plugins __path__ was invalid JSON (#990) * fix(test): sporadic failures of downsampling test on Windows (#995) * fix: set the minimal sleep interval to 17ms on windows * fixup! fix: set the minimal sleep interval to 17ms on windows * fixup! fix: set the minimal sleep interval to 17ms on windows * Acl fix (#993) * ACL does not intercept messages with no key_expr * Update DEFAULT_CONFIG.json5 --------- Co-authored-by: Yuyuan Yuan Co-authored-by: Mahmoud Mazouz Co-authored-by: Julien Enoch Co-authored-by: OlivierHecart --- DEFAULT_CONFIG.json5 | 2 ++ commons/zenoh-runtime/Cargo.toml | 12 ++----- commons/zenoh-runtime/src/lib.rs | 33 +++-------------- .../tests/operations.rs | 5 ++- .../tests/wildcard.rs | 5 ++- .../src/manager/dynamic_plugin.rs | 2 +- .../src/manager/static_plugin.rs | 2 +- zenoh-ext/examples/Cargo.toml | 1 + zenoh/src/api/session.rs | 19 ++++++---- .../net/routing/interceptor/access_control.rs | 20 +++++------ zenoh/src/net/runtime/adminspace.rs | 9 ++++- zenoh/src/net/runtime/mod.rs | 36 +++---------------- zenoh/src/net/runtime/orchestrator.rs | 2 +- zenoh/tests/interceptors.rs | 9 +++-- zenoh/tests/session.rs | 8 +++-- 15 files changed, 68 insertions(+), 97 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index bd3bbbaf6b..ec9a827777 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -175,6 +175,7 @@ // ], // }, // ], + // /// configure access control (ACL) rules // access_control: { // ///[true/false] acl will be activated only if this is set to true @@ -199,6 +200,7 @@ // }, // ] //}, + /// Configure internal transport parameters transport: { unicast: { diff --git a/commons/zenoh-runtime/Cargo.toml b/commons/zenoh-runtime/Cargo.toml index e3a08a9de8..3625e5036f 100644 --- a/commons/zenoh-runtime/Cargo.toml +++ b/commons/zenoh-runtime/Cargo.toml @@ -18,17 +18,9 @@ ron = { workspace = true } serde = { workspace = true } futures = { workspace = true } lazy_static = { workspace = true } +tokio = { workspace = true, features = ["fs", "io-util", "macros", "net", "rt-multi-thread", "sync", "time"] } +tracing = { workspace = true } zenoh-result = { workspace = true, features = ["std"] } zenoh-protocol = { workspace = true } zenoh-collections = { workspace = true, features = ["std"] } zenoh-macros = { workspace = true } -tokio = { workspace = true, features = [ - "fs", - "io-util", - "macros", - "net", - "rt-multi-thread", - "sync", - "time", -] } -tracing = { workspace = true } diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs index cb58cac570..dcd46744e6 100644 --- a/commons/zenoh-runtime/src/lib.rs +++ b/commons/zenoh-runtime/src/lib.rs @@ -157,6 +157,8 @@ pub struct ZRuntimePool(HashMap>); impl ZRuntimePool { fn new() -> Self { + // It has been recognized that using atexit within Windows DLL is problematic + #[cfg(not(target_os = "windows"))] // Register a callback to clean the static variables. unsafe { libc::atexit(cleanup); @@ -184,42 +186,17 @@ impl ZRuntimePool { // If there are any blocking tasks spawned by ZRuntimes, the function will block until they return. impl Drop for ZRuntimePool { fn drop(&mut self) { - std::panic::set_hook(Box::new(|_| { - // To suppress the panic error caught in the following `catch_unwind`. - })); - let handles: Vec<_> = self .0 .drain() .filter_map(|(_name, mut rt)| { - rt.take().map(|r| { - // NOTE: The error of the atexit handler in DLL (static lib is fine) - // failing to spawn a new thread in `cleanup` has been identified. - std::panic::catch_unwind(|| { - std::thread::spawn(move || r.shutdown_timeout(Duration::from_secs(1))) - }) - }) + rt.take() + .map(|r| std::thread::spawn(move || r.shutdown_timeout(Duration::from_secs(1)))) }) .collect(); for hd in handles { - match hd { - Ok(handle) => { - if let Err(err) = handle.join() { - tracing::error!( - "The handle failed to join during `ZRuntimePool` drop due to {err:?}" - ); - } - } - Err(err) => { - // WARN: Windows with DLL is expected to panic for the time being. - // Otherwise, report the error. - #[cfg(not(target_os = "windows"))] - tracing::error!("`ZRuntimePool` failed to drop due to {err:?}"); - #[cfg(target_os = "windows")] - tracing::trace!("`ZRuntimePool` failed to drop due to {err:?}"); - } - } + let _ = hd.join(); } } } diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index b5384e13be..61ea53deba 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -70,7 +70,10 @@ async fn test_updates_in_order() { ) .unwrap(); - let runtime = zenoh::runtime::Runtime::new(config).await.unwrap(); + let runtime = zenoh::runtime::RuntimeBuilder::new(config) + .build() + .await + .unwrap(); let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index bd38e834d7..f2482da8e5 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -71,7 +71,10 @@ async fn test_wild_card_in_order() { ) .unwrap(); - let runtime = zenoh::runtime::Runtime::new(config).await.unwrap(); + let runtime = zenoh::runtime::RuntimeBuilder::new(config) + .build() + .await + .unwrap(); let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); diff --git a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs index 90008aad36..a8a78306ea 100644 --- a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs @@ -142,7 +142,7 @@ impl PluginStatus if let Some(starter) = &self.starter { starter.path() } else { - "" + "__not_loaded__" } } fn state(&self) -> PluginState { diff --git a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs index 6d1bcae278..c275fb9818 100644 --- a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs @@ -51,7 +51,7 @@ where Some(P::PLUGIN_LONG_VERSION) } fn path(&self) -> &str { - "" + "__static_lib__" } fn state(&self) -> PluginState { self.instance diff --git a/zenoh-ext/examples/Cargo.toml b/zenoh-ext/examples/Cargo.toml index 3493016835..9cca8848ff 100644 --- a/zenoh-ext/examples/Cargo.toml +++ b/zenoh-ext/examples/Cargo.toml @@ -22,6 +22,7 @@ edition = { workspace = true } license = { workspace = true } categories = { workspace = true } description = "Internal crate for zenoh" +publish = false [badges] maintenance = { status = "actively-developed" } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index dea322419c..703fca2e9d 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -31,7 +31,11 @@ use super::{ value::Value, Id, }; -use crate::net::{primitives::Primitives, routing::dispatcher::face::Face, runtime::Runtime}; +use crate::net::{ + primitives::Primitives, + routing::dispatcher::face::Face, + runtime::{Runtime, RuntimeBuilder}, +}; use std::future::IntoFuture; use std::{ collections::HashMap, @@ -842,12 +846,13 @@ impl Session { tracing::debug!("Config: {:?}", &config); let aggregated_subscribers = config.aggregation().subscribers().clone(); let aggregated_publishers = config.aggregation().publishers().clone(); - let mut runtime = Runtime::init( - config, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] - shm_clients, - ) - .await?; + #[allow(unused_mut)] // Required for shared-memory + let mut runtime = RuntimeBuilder::new(config); + #[cfg(all(feature = "unstable", feature = "shared-memory"))] + { + runtime = runtime.shm_clients(shm_clients); + } + let mut runtime = runtime.build().await?; let mut session = Self::init( runtime.clone(), diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index b23db9765e..102e30a0df 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -162,14 +162,14 @@ impl InterceptorTrait for IngressAclEnforcer { None } }) - .or_else(|| ctx.full_expr())?; + .or_else(|| ctx.full_expr()); match &ctx.msg.body { NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) => { - if self.action(Action::Put, "Put (ingress)", key_expr) == Permission::Deny { + if self.action(Action::Put, "Put (ingress)", key_expr?) == Permission::Deny { return None; } } @@ -177,7 +177,7 @@ impl InterceptorTrait for IngressAclEnforcer { payload: RequestBody::Query(_), .. }) => { - if self.action(Action::Get, "Get (ingress)", key_expr) == Permission::Deny { + if self.action(Action::Get, "Get (ingress)", key_expr?) == Permission::Deny { return None; } } @@ -188,7 +188,7 @@ impl InterceptorTrait for IngressAclEnforcer { if self.action( Action::DeclareSubscriber, "Declare Subscriber (ingress)", - key_expr, + key_expr?, ) == Permission::Deny { return None; @@ -201,7 +201,7 @@ impl InterceptorTrait for IngressAclEnforcer { if self.action( Action::DeclareQueryable, "Declare Queryable (ingress)", - key_expr, + key_expr?, ) == Permission::Deny { return None; @@ -230,14 +230,14 @@ impl InterceptorTrait for EgressAclEnforcer { None } }) - .or_else(|| ctx.full_expr())?; + .or_else(|| ctx.full_expr()); match &ctx.msg.body { NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) => { - if self.action(Action::Put, "Put (egress)", key_expr) == Permission::Deny { + if self.action(Action::Put, "Put (egress)", key_expr?) == Permission::Deny { return None; } } @@ -245,7 +245,7 @@ impl InterceptorTrait for EgressAclEnforcer { payload: RequestBody::Query(_), .. }) => { - if self.action(Action::Get, "Get (egress)", key_expr) == Permission::Deny { + if self.action(Action::Get, "Get (egress)", key_expr?) == Permission::Deny { return None; } } @@ -256,7 +256,7 @@ impl InterceptorTrait for EgressAclEnforcer { if self.action( Action::DeclareSubscriber, "Declare Subscriber (egress)", - key_expr, + key_expr?, ) == Permission::Deny { return None; @@ -269,7 +269,7 @@ impl InterceptorTrait for EgressAclEnforcer { if self.action( Action::DeclareQueryable, "Declare Queryable (egress)", - key_expr, + key_expr?, ) == Permission::Deny { return None; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 9ea54b8d88..3f2e0b488f 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -793,7 +793,14 @@ fn plugins_status(context: &AdminContext, query: Query) { with_extended_string(plugin_key, &["/__path__"], |plugin_path_key| { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { - if let Err(e) = query.reply(key_expr, plugin.path()).wait() { + if let Err(e) = query + .reply( + key_expr, + serde_json::to_string(plugin.path()) + .unwrap_or_else(|_| String::from("{}")), + ) + .wait() + { tracing::error!("Error sending AdminSpace reply: {:?}", e); } } diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 4991844650..f1cf4d95d2 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -184,6 +184,11 @@ impl RuntimeBuilder { *handler.runtime.write().unwrap() = Runtime::downgrade(&runtime); get_mut_unchecked(&mut runtime.state.router.clone()).init_link_state(runtime.clone()); + // Admin space + if start_admin_space { + AdminSpace::start(&runtime, LONG_VERSION.clone()).await; + } + // Start plugins #[cfg(all(feature = "unstable", feature = "plugins"))] start_plugins(&runtime); @@ -215,11 +220,6 @@ impl RuntimeBuilder { } }); - // Admin space - if start_admin_space { - AdminSpace::start(&runtime, LONG_VERSION.clone()).await; - } - Ok(runtime) } } @@ -241,32 +241,6 @@ impl StructVersion for Runtime { impl PluginStartArgs for Runtime {} impl Runtime { - pub async fn new(config: Config) -> ZResult { - // Create plugin_manager and load plugins - let mut runtime = Runtime::init( - config, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] - None, - ) - .await?; - match runtime.start().await { - Ok(()) => Ok(runtime), - Err(err) => Err(err), - } - } - - pub(crate) async fn init( - config: Config, - #[cfg(all(feature = "unstable", feature = "shared-memory"))] shm_clients: Option< - Arc, - >, - ) -> ZResult { - let builder = RuntimeBuilder::new(config); - #[cfg(all(feature = "unstable", feature = "shared-memory"))] - let builder = builder.shm_clients(shm_clients); - builder.build().await - } - #[inline(always)] pub(crate) fn manager(&self) -> &TransportManager { &self.state.manager diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 687fa90649..c2c7ecedd2 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -43,7 +43,7 @@ pub enum Loop { } impl Runtime { - pub(crate) async fn start(&mut self) -> ZResult<()> { + pub async fn start(&mut self) -> ZResult<()> { match self.whatami() { WhatAmI::Client => self.start_client().await, WhatAmI::Peer => self.start_peer().await, diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index f6e876d92e..37f193630d 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -15,6 +15,11 @@ use std::sync::{Arc, Mutex}; use zenoh::internal::zlock; use zenoh::prelude::*; +#[cfg(target_os = "windows")] +static MINIMAL_SLEEP_INTERVAL_MS: u64 = 17; +#[cfg(not(target_os = "windows"))] +static MINIMAL_SLEEP_INTERVAL_MS: u64 = 2; + struct IntervalCounter { first_tick: bool, last_time: std::time::Instant, @@ -143,7 +148,7 @@ fn downsampling_by_keyexpr_impl(egress: bool) { .unwrap(); // WARN(yuyuan): 2 ms is the limit of tokio - let interval = std::time::Duration::from_millis(2); + let interval = std::time::Duration::from_millis(MINIMAL_SLEEP_INTERVAL_MS); let messages_count = 1000; for i in 0..messages_count { publisher_r100.put(format!("message {}", i)).wait().unwrap(); @@ -245,7 +250,7 @@ fn downsampling_by_interface_impl(egress: bool) { .unwrap(); // WARN(yuyuan): 2 ms is the limit of tokio - let interval = std::time::Duration::from_millis(2); + let interval = std::time::Duration::from_millis(MINIMAL_SLEEP_INTERVAL_MS); let messages_count = 1000; for i in 0..messages_count { publisher_r100.put(format!("message {}", i)).wait().unwrap(); diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index b52dbb90b8..43dfc79470 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -17,7 +17,7 @@ use std::time::Duration; use zenoh::internal::ztimeout; use zenoh::prelude::*; #[cfg(feature = "unstable")] -use zenoh::runtime::Runtime; +use zenoh::runtime::{Runtime, RuntimeBuilder}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); @@ -264,7 +264,8 @@ async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) .collect::>(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Creating r1 session runtime: {:?}", endpoints); - let r1 = Runtime::new(config).await.unwrap(); + let mut r1 = RuntimeBuilder::new(config).build().await.unwrap(); + r1.start().await.unwrap(); let mut config = config::peer(); config.connect.endpoints = endpoints @@ -273,7 +274,8 @@ async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) .collect::>(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Creating r2 session runtime: {:?}", endpoints); - let r2 = Runtime::new(config).await.unwrap(); + let mut r2 = RuntimeBuilder::new(config).build().await.unwrap(); + r2.start().await.unwrap(); (r1, r2) } From 8fe5ab5852fdaa18a88f450d4531d9fbbfbc7531 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 30 Apr 2024 16:02:50 +0200 Subject: [PATCH 312/357] Rename ZidBuilder to ZenohIdBuilder (#994) --- zenoh/src/api/info.rs | 36 ++++++++++++++++++------------------ zenoh/src/lib.rs | 6 +++--- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index a6f8ff1629..205a412142 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -33,21 +33,21 @@ use zenoh_protocol::core::{WhatAmI, ZenohId}; /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ZidBuilder<'a> { +pub struct ZenohIdBuilder<'a> { pub(crate) session: SessionRef<'a>, } -impl<'a> Resolvable for ZidBuilder<'a> { +impl<'a> Resolvable for ZenohIdBuilder<'a> { type To = ZenohId; } -impl<'a> Wait for ZidBuilder<'a> { +impl<'a> Wait for ZenohIdBuilder<'a> { fn wait(self) -> Self::To { self.session.runtime.zid() } } -impl<'a> IntoFuture for ZidBuilder<'a> { +impl<'a> IntoFuture for ZenohIdBuilder<'a> { type Output = ::To; type IntoFuture = Ready<::To>; @@ -73,15 +73,15 @@ impl<'a> IntoFuture for ZidBuilder<'a> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct RoutersZidBuilder<'a> { +pub struct RoutersZenohIdBuilder<'a> { pub(crate) session: SessionRef<'a>, } -impl<'a> Resolvable for RoutersZidBuilder<'a> { +impl<'a> Resolvable for RoutersZenohIdBuilder<'a> { type To = Box + Send + Sync>; } -impl<'a> Wait for RoutersZidBuilder<'a> { +impl<'a> Wait for RoutersZenohIdBuilder<'a> { fn wait(self) -> Self::To { Box::new( zenoh_runtime::ZRuntime::Application @@ -97,7 +97,7 @@ impl<'a> Wait for RoutersZidBuilder<'a> { } } -impl<'a> IntoFuture for RoutersZidBuilder<'a> { +impl<'a> IntoFuture for RoutersZenohIdBuilder<'a> { type Output = ::To; type IntoFuture = Ready<::To>; @@ -123,15 +123,15 @@ impl<'a> IntoFuture for RoutersZidBuilder<'a> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct PeersZidBuilder<'a> { +pub struct PeersZenohIdBuilder<'a> { pub(crate) session: SessionRef<'a>, } -impl<'a> Resolvable for PeersZidBuilder<'a> { +impl<'a> Resolvable for PeersZenohIdBuilder<'a> { type To = Box + Send + Sync>; } -impl<'a> Wait for PeersZidBuilder<'a> { +impl<'a> Wait for PeersZenohIdBuilder<'a> { fn wait(self) -> ::To { Box::new( zenoh_runtime::ZRuntime::Application @@ -147,7 +147,7 @@ impl<'a> Wait for PeersZidBuilder<'a> { } } -impl<'a> IntoFuture for PeersZidBuilder<'a> { +impl<'a> IntoFuture for PeersZenohIdBuilder<'a> { type Output = ::To; type IntoFuture = Ready<::To>; @@ -187,8 +187,8 @@ impl SessionInfo<'_> { /// let zid = session.info().zid().await; /// # } /// ``` - pub fn zid(&self) -> ZidBuilder<'_> { - ZidBuilder { + pub fn zid(&self) -> ZenohIdBuilder<'_> { + ZenohIdBuilder { session: self.session.clone(), } } @@ -207,8 +207,8 @@ impl SessionInfo<'_> { /// while let Some(router_zid) = routers_zid.next() {} /// # } /// ``` - pub fn routers_zid(&self) -> RoutersZidBuilder<'_> { - RoutersZidBuilder { + pub fn routers_zid(&self) -> RoutersZenohIdBuilder<'_> { + RoutersZenohIdBuilder { session: self.session.clone(), } } @@ -226,8 +226,8 @@ impl SessionInfo<'_> { /// while let Some(peer_zid) = peers_zid.next() {} /// # } /// ``` - pub fn peers_zid(&self) -> PeersZidBuilder<'_> { - PeersZidBuilder { + pub fn peers_zid(&self) -> PeersZenohIdBuilder<'_> { + PeersZenohIdBuilder { session: self.session.clone(), } } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index c4247b73da..71ab3a72e8 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -215,10 +215,10 @@ pub mod session { /// Tools to access information about the current zenoh [`Session`](crate::Session). pub mod info { - pub use crate::api::info::PeersZidBuilder; - pub use crate::api::info::RoutersZidBuilder; + pub use crate::api::info::PeersZenohIdBuilder; + pub use crate::api::info::RoutersZenohIdBuilder; pub use crate::api::info::SessionInfo; - pub use crate::api::info::ZidBuilder; + pub use crate::api::info::ZenohIdBuilder; } /// Sample primitives From bca2fd74ca5bdc132e194ef62d56429580d852bc Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 30 Apr 2024 18:10:38 +0300 Subject: [PATCH 313/357] Massive renaming for ZSliceShm and ZSliceShmMut --- .../src/api/{slice => buffer}/mod.rs | 4 +- .../src/api/{slice => buffer}/traits.rs | 0 .../{slice/zsliceshm.rs => buffer/zshm.rs} | 76 ++++++++-------- .../zsliceshmmut.rs => buffer/zshmmut.rs} | 86 +++++++++---------- commons/zenoh-shm/src/api/mod.rs | 2 +- .../api/provider/shared_memory_provider.rs | 12 +-- commons/zenoh-shm/src/api/provider/types.rs | 4 +- examples/examples/z_get_shm.rs | 2 +- examples/examples/z_payload_shm.rs | 36 ++++---- examples/examples/z_ping_shm.rs | 2 +- examples/examples/z_queryable_shm.rs | 2 +- examples/examples/z_sub_shm.rs | 6 +- zenoh/src/api/bytes.rs | 48 +++++------ zenoh/src/api/encoding.rs | 6 +- zenoh/src/lib.rs | 8 +- zenoh/tests/bytes.rs | 32 +++---- 16 files changed, 163 insertions(+), 163 deletions(-) rename commons/zenoh-shm/src/api/{slice => buffer}/mod.rs (92%) rename commons/zenoh-shm/src/api/{slice => buffer}/traits.rs (100%) rename commons/zenoh-shm/src/api/{slice/zsliceshm.rs => buffer/zshm.rs} (59%) rename commons/zenoh-shm/src/api/{slice/zsliceshmmut.rs => buffer/zshmmut.rs} (59%) diff --git a/commons/zenoh-shm/src/api/slice/mod.rs b/commons/zenoh-shm/src/api/buffer/mod.rs similarity index 92% rename from commons/zenoh-shm/src/api/slice/mod.rs rename to commons/zenoh-shm/src/api/buffer/mod.rs index 59c793f94a..8a3e040da9 100644 --- a/commons/zenoh-shm/src/api/slice/mod.rs +++ b/commons/zenoh-shm/src/api/buffer/mod.rs @@ -13,5 +13,5 @@ // pub mod traits; -pub mod zsliceshm; -pub mod zsliceshmmut; +pub mod zshm; +pub mod zshmmut; diff --git a/commons/zenoh-shm/src/api/slice/traits.rs b/commons/zenoh-shm/src/api/buffer/traits.rs similarity index 100% rename from commons/zenoh-shm/src/api/slice/traits.rs rename to commons/zenoh-shm/src/api/buffer/traits.rs diff --git a/commons/zenoh-shm/src/api/slice/zsliceshm.rs b/commons/zenoh-shm/src/api/buffer/zshm.rs similarity index 59% rename from commons/zenoh-shm/src/api/slice/zsliceshm.rs rename to commons/zenoh-shm/src/api/buffer/zshm.rs index 86f4395ebb..e7cf2a3197 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshm.rs +++ b/commons/zenoh-shm/src/api/buffer/zshm.rs @@ -22,43 +22,43 @@ use zenoh_buffers::{ZBuf, ZSlice}; use crate::SharedMemoryBuf; -use super::{traits::SHMBuf, zsliceshmmut::zsliceshmmut}; +use super::{traits::SHMBuf, zshmmut::zshmmut}; -/// An immutable SHM slice +/// An immutable SHM buffer #[zenoh_macros::unstable_doc] #[repr(transparent)] #[derive(Clone, Debug, PartialEq, Eq)] -pub struct ZSliceShm(pub(crate) SharedMemoryBuf); +pub struct ZShm(pub(crate) SharedMemoryBuf); -impl SHMBuf for ZSliceShm { +impl SHMBuf for ZShm { fn is_valid(&self) -> bool { self.0.is_valid() } } -impl PartialEq<&zsliceshm> for ZSliceShm { - fn eq(&self, other: &&zsliceshm) -> bool { +impl PartialEq<&zshm> for ZShm { + fn eq(&self, other: &&zshm) -> bool { self.0 == other.0 .0 } } -impl Borrow for ZSliceShm { - fn borrow(&self) -> &zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShm { + fn borrow(&self) -> &zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShm { - fn borrow_mut(&mut self) -> &mut zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShm { + fn borrow_mut(&mut self) -> &mut zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Deref for ZSliceShm { +impl Deref for ZShm { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -66,37 +66,37 @@ impl Deref for ZSliceShm { } } -impl AsRef<[u8]> for ZSliceShm { +impl AsRef<[u8]> for ZShm { fn as_ref(&self) -> &[u8] { self } } -impl From for ZSliceShm { +impl From for ZShm { fn from(value: SharedMemoryBuf) -> Self { Self(value) } } -impl From for ZSlice { - fn from(value: ZSliceShm) -> Self { +impl From for ZSlice { + fn from(value: ZShm) -> Self { value.0.into() } } -impl From for ZBuf { - fn from(value: ZSliceShm) -> Self { +impl From for ZBuf { + fn from(value: ZShm) -> Self { value.0.into() } } -impl TryFrom<&mut ZSliceShm> for &mut zsliceshmmut { +impl TryFrom<&mut ZShm> for &mut zshmmut { type Error = (); - fn try_from(value: &mut ZSliceShm) -> Result { + fn try_from(value: &mut ZShm) -> Result { match value.0.is_unique() && value.0.is_valid() { true => { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction Ok(unsafe { core::mem::transmute(value) }) } @@ -105,64 +105,64 @@ impl TryFrom<&mut ZSliceShm> for &mut zsliceshmmut { } } -/// A borrowed immutable SHM slice +/// A borrowed immutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[allow(non_camel_case_types)] #[repr(transparent)] -pub struct zsliceshm(ZSliceShm); +pub struct zshm(ZShm); -impl ToOwned for zsliceshm { - type Owned = ZSliceShm; +impl ToOwned for zshm { + type Owned = ZShm; fn to_owned(&self) -> Self::Owned { self.0.clone() } } -impl PartialEq for &zsliceshm { - fn eq(&self, other: &ZSliceShm) -> bool { +impl PartialEq for &zshm { + fn eq(&self, other: &ZShm) -> bool { self.0 .0 == other.0 } } -impl Deref for zsliceshm { - type Target = ZSliceShm; +impl Deref for zshm { + type Target = ZShm; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for zsliceshm { +impl DerefMut for zshm { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl From<&SharedMemoryBuf> for &zsliceshm { +impl From<&SharedMemoryBuf> for &zshm { fn from(value: &SharedMemoryBuf) -> Self { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(value) } } } -impl From<&mut SharedMemoryBuf> for &mut zsliceshm { +impl From<&mut SharedMemoryBuf> for &mut zshm { fn from(value: &mut SharedMemoryBuf) -> Self { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(value) } } } -impl TryFrom<&mut zsliceshm> for &mut zsliceshmmut { +impl TryFrom<&mut zshm> for &mut zshmmut { type Error = (); - fn try_from(value: &mut zsliceshm) -> Result { + fn try_from(value: &mut zshm) -> Result { match value.0 .0.is_unique() && value.0 .0.is_valid() { true => { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction Ok(unsafe { core::mem::transmute(value) }) } diff --git a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs b/commons/zenoh-shm/src/api/buffer/zshmmut.rs similarity index 59% rename from commons/zenoh-shm/src/api/slice/zsliceshmmut.rs rename to commons/zenoh-shm/src/api/buffer/zshmmut.rs index 62823785da..e40c9c77f1 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs +++ b/commons/zenoh-shm/src/api/buffer/zshmmut.rs @@ -21,36 +21,36 @@ use crate::SharedMemoryBuf; use super::{ traits::{SHMBuf, SHMBufMut}, - zsliceshm::{zsliceshm, ZSliceShm}, + zshm::{zshm, ZShm}, }; -/// A mutable SHM slice +/// A mutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[repr(transparent)] -pub struct ZSliceShmMut(SharedMemoryBuf); +pub struct ZShmMut(SharedMemoryBuf); -impl SHMBuf for ZSliceShmMut { +impl SHMBuf for ZShmMut { fn is_valid(&self) -> bool { self.0.is_valid() } } -impl SHMBufMut for ZSliceShmMut {} +impl SHMBufMut for ZShmMut {} -impl ZSliceShmMut { +impl ZShmMut { pub(crate) unsafe fn new_unchecked(data: SharedMemoryBuf) -> Self { Self(data) } } -impl PartialEq for &ZSliceShmMut { - fn eq(&self, other: &zsliceshmmut) -> bool { +impl PartialEq for &ZShmMut { + fn eq(&self, other: &zshmmut) -> bool { self.0 == other.0 .0 } } -impl TryFrom for ZSliceShmMut { +impl TryFrom for ZShmMut { type Error = SharedMemoryBuf; fn try_from(value: SharedMemoryBuf) -> Result { @@ -61,10 +61,10 @@ impl TryFrom for ZSliceShmMut { } } -impl TryFrom for ZSliceShmMut { - type Error = ZSliceShm; +impl TryFrom for ZShmMut { + type Error = ZShm; - fn try_from(value: ZSliceShm) -> Result { + fn try_from(value: ZShm) -> Result { match value.0.is_unique() && value.0.is_valid() { true => Ok(Self(value.0)), false => Err(value), @@ -72,39 +72,39 @@ impl TryFrom for ZSliceShmMut { } } -impl Borrow for ZSliceShmMut { - fn borrow(&self) -> &zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShmMut { + fn borrow(&self) -> &zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShmMut { - fn borrow_mut(&mut self) -> &mut zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShmMut { + fn borrow_mut(&mut self) -> &mut zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Borrow for ZSliceShmMut { - fn borrow(&self) -> &zsliceshmmut { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShmMut { + fn borrow(&self) -> &zshmmut { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShmMut { - fn borrow_mut(&mut self) -> &mut zsliceshmmut { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShmMut { + fn borrow_mut(&mut self) -> &mut zshmmut { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Deref for ZSliceShmMut { +impl Deref for ZShmMut { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -112,75 +112,75 @@ impl Deref for ZSliceShmMut { } } -impl DerefMut for ZSliceShmMut { +impl DerefMut for ZShmMut { fn deref_mut(&mut self) -> &mut Self::Target { self.0.as_mut() } } -impl AsRef<[u8]> for ZSliceShmMut { +impl AsRef<[u8]> for ZShmMut { fn as_ref(&self) -> &[u8] { self } } -impl AsMut<[u8]> for ZSliceShmMut { +impl AsMut<[u8]> for ZShmMut { fn as_mut(&mut self) -> &mut [u8] { self } } -impl From for ZSliceShm { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZShm { + fn from(value: ZShmMut) -> Self { value.0.into() } } -impl From for ZSlice { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZSlice { + fn from(value: ZShmMut) -> Self { value.0.into() } } -impl From for ZBuf { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZBuf { + fn from(value: ZShmMut) -> Self { value.0.into() } } -/// A borrowed mutable SHM slice +/// A borrowed mutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[allow(non_camel_case_types)] #[repr(transparent)] -pub struct zsliceshmmut(ZSliceShmMut); +pub struct zshmmut(ZShmMut); -impl PartialEq for &zsliceshmmut { - fn eq(&self, other: &ZSliceShmMut) -> bool { +impl PartialEq for &zshmmut { + fn eq(&self, other: &ZShmMut) -> bool { self.0 .0 == other.0 } } -impl Deref for zsliceshmmut { - type Target = ZSliceShmMut; +impl Deref for zshmmut { + type Target = ZShmMut; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for zsliceshmmut { +impl DerefMut for zshmmut { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl TryFrom<&mut SharedMemoryBuf> for &mut zsliceshmmut { +impl TryFrom<&mut SharedMemoryBuf> for &mut zshmmut { type Error = (); fn try_from(value: &mut SharedMemoryBuf) -> Result { match value.is_unique() && value.is_valid() { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction true => Ok(unsafe { core::mem::transmute(value) }), false => Err(()), diff --git a/commons/zenoh-shm/src/api/mod.rs b/commons/zenoh-shm/src/api/mod.rs index 08a5678fa8..a87188da29 100644 --- a/commons/zenoh-shm/src/api/mod.rs +++ b/commons/zenoh-shm/src/api/mod.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // +pub mod buffer; pub mod client; pub mod client_storage; pub mod common; pub mod protocol_implementations; pub mod provider; -pub mod slice; diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index c3b8128300..82a4789738 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -23,7 +23,7 @@ use async_trait::async_trait; use zenoh_result::ZResult; use crate::{ - api::{common::types::ProtocolID, slice::zsliceshmmut::ZSliceShmMut}, + api::{buffer::zshmmut::ZShmMut, common::types::ProtocolID}, header::{ allocated_descriptor::AllocatedHeaderDescriptor, descriptor::HeaderDescriptor, storage::GLOBAL_HEADER_STORAGE, @@ -713,11 +713,11 @@ where self.backend.defragment() } - /// Map externally-allocated chunk into ZSliceShmMut. + /// Map externally-allocated chunk into ZShmMut. /// This method is designed to be used with push data sources. /// Remember that chunk's len may be >= len! #[zenoh_macros::unstable_doc] - pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { + pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { // allocate resources for SHM buffer let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; @@ -729,7 +729,7 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } /// Try to collect free chunks. @@ -806,7 +806,7 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } fn alloc_resources() -> ZResult<( @@ -911,6 +911,6 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } } diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index 662482f567..b7f1ad2de6 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -16,7 +16,7 @@ use std::fmt::Display; use zenoh_result::{bail, ZResult}; -use crate::api::slice::zsliceshmmut::ZSliceShmMut; +use crate::api::buffer::zshmmut::ZShmMut; use super::chunk::AllocatedChunk; @@ -170,4 +170,4 @@ pub type ChunkAllocResult = Result; /// SHM buffer allocation result #[zenoh_macros::unstable_doc] -pub type BufAllocResult = Result; +pub type BufAllocResult = Result; diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 60015829aa..2773348fd0 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -94,7 +94,7 @@ async fn main() { match reply.result() { Ok(sample) => { print!(">> Received ('{}': ", sample.key_expr().as_str()); - match sample.payload().deserialize::<&zsliceshm>() { + match sample.payload().deserialize::<&zshm>() { Ok(payload) => println!("'{}')", String::from_utf8_lossy(payload),), Err(e) => println!("'Not a SharedMemoryBuf: {:?}')", e), } diff --git a/examples/examples/z_payload_shm.rs b/examples/examples/z_payload_shm.rs index 4bf45381de..d9ab4e1f82 100644 --- a/examples/examples/z_payload_shm.rs +++ b/examples/examples/z_payload_shm.rs @@ -14,8 +14,8 @@ use zenoh::{ bytes::ZBytes, shm::{ - zsliceshm, zsliceshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, - ZSliceShm, ZSliceShmMut, POSIX_PROTOCOL_ID, + zshm, zshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, ZShm, + ZShmMut, POSIX_PROTOCOL_ID, }, }; @@ -35,59 +35,59 @@ fn main() { // Prepare a layout for allocations let layout = provider.alloc_layout().size(1024).res().unwrap(); - // allocate an SHM buffer (ZSliceShmMut) + // allocate an SHM buffer (ZShmMut) let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); // mutable and immutable API let _data: &[u8] = &owned_shm_buf_mut; let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - // convert into immutable owned buffer (ZSliceShmMut -> ZSlceShm) - let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); + // convert into immutable owned buffer (ZShmMut -> ZSlceShm) + let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); // immutable API let _data: &[u8] = &owned_shm_buf; - // convert again into mutable owned buffer (ZSliceShm -> ZSlceShmMut) - let mut owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); + // convert again into mutable owned buffer (ZShm -> ZSlceShmMut) + let mut owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); // mutable and immutable API let _data: &[u8] = &owned_shm_buf_mut; let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - // build a ZBytes from an SHM buffer (ZSliceShmMut -> ZBytes) + // build a ZBytes from an SHM buffer (ZShmMut -> ZBytes) let mut payload: ZBytes = owned_shm_buf_mut.into(); // branch to illustrate immutable access to SHM data { - // deserialize ZBytes as an immutably borrowed zsliceshm (ZBytes -> &zsliceshm) - let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); + // deserialize ZBytes as an immutably borrowed zshm (ZBytes -> &zshm) + let borrowed_shm_buf: &zshm = payload.deserialize().unwrap(); // immutable API let _data: &[u8] = borrowed_shm_buf; - // construct owned buffer from borrowed type (&zsliceshm -> ZSliceShm) + // construct owned buffer from borrowed type (&zshm -> ZShm) let owned = borrowed_shm_buf.to_owned(); // immutable API let _data: &[u8] = &owned; - // try to construct mutable ZSliceShmMut (ZSliceShm -> ZSliceShmMut) - let owned_mut: Result = owned.try_into(); - // the attempt fails because ZSliceShm has two existing references ('owned' and inside 'payload') + // try to construct mutable ZShmMut (ZShm -> ZShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZShm has two existing references ('owned' and inside 'payload') assert!(owned_mut.is_err()) } // branch to illustrate mutable access to SHM data { - // deserialize ZBytes as mutably borrowed zsliceshm (ZBytes -> &mut zsliceshm) - let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); + // deserialize ZBytes as mutably borrowed zshm (ZBytes -> &mut zshm) + let borrowed_shm_buf: &mut zshm = payload.deserialize_mut().unwrap(); // immutable API let _data: &[u8] = borrowed_shm_buf; - // convert zsliceshm to zsliceshmmut (&mut zsliceshm -> &mut zsliceshmmut) - let borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); + // convert zshm to zshmmut (&mut zshm -> &mut zshmmut) + let borrowed_shm_buf_mut: &mut zshmmut = borrowed_shm_buf.try_into().unwrap(); // mutable and immutable API let _data: &[u8] = borrowed_shm_buf_mut; diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 7a7bd61580..372967f6e8 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -80,7 +80,7 @@ fn main() { .res() .unwrap(); - // convert ZSliceShmMut into ZSlice as ZSliceShmMut does not support Clone + // convert ZShmMut into ZSlice as ZShmMut does not support Clone let buf: ZSlice = buf.into(); // -- warmup -- diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index 62fa7571d5..49939dcb0a 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -76,7 +76,7 @@ async fn main() { query.key_expr().as_str(), ); if let Some(payload) = query.payload() { - match payload.deserialize::<&zsliceshm>() { + match payload.deserialize::<&zshm>() { Ok(payload) => print!(": '{}'", String::from_utf8_lossy(payload)), Err(e) => print!(": 'Not a SharedMemoryBuf: {:?}'", e), } diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index a43b5c6cd0..a7e96c2b75 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -41,7 +41,7 @@ async fn main() { sample.kind(), sample.key_expr().as_str(), ); - match sample.payload().deserialize::<&zsliceshm>() { + match sample.payload().deserialize::<&zshm>() { Ok(payload) => print!("'{}'", String::from_utf8_lossy(payload)), Err(e) => print!("'Not a SharedMemoryBuf: {:?}'", e), } @@ -52,12 +52,12 @@ async fn main() { // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. // - // use zenoh::shm::zsliceshmmut; + // use zenoh::shm::zshmmut; // while let Ok(mut sample) = subscriber.recv_async().await { // let kind = sample.kind(); // let key_expr = sample.key_expr().to_string(); - // match sample.payload_mut().deserialize_mut::<&mut zsliceshmmut>() { + // match sample.payload_mut().deserialize_mut::<&mut zshmmut>() { // Ok(payload) => println!( // ">> [Subscriber] Received {} ('{}': '{:02x?}')", // kind, key_expr, payload diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index c36136ef81..ce88b2bdbe 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -30,9 +30,9 @@ use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::{ - api::slice::{ - zsliceshm::{zsliceshm, ZSliceShm}, - zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + api::buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, }, SharedMemoryBuf, }; @@ -1524,47 +1524,47 @@ impl TryFrom<&mut ZBytes> for serde_pickle::Value { // Shared memory conversion #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: ZSliceShm) -> Self::Output { + fn serialize(self, t: ZShm) -> Self::Output { let slice: ZSlice = t.into(); ZBytes::new(slice) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShm) -> Self { +impl From for ZBytes { + fn from(t: ZShm) -> Self { ZSerde.serialize(t) } } // Shared memory conversion #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: ZSliceShmMut) -> Self::Output { + fn serialize(self, t: ZShmMut) -> Self::Output { let slice: ZSlice = t.into(); ZBytes::new(slice) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShmMut) -> Self { +impl From for ZBytes { + fn from(t: ZShmMut) -> Self { ZSerde.serialize(t) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { +impl<'a> Deserialize<'a, &'a zshm> for ZSerde { type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a zsliceshm, Self::Error> { - // A ZSliceShm is expected to have only one slice + fn deserialize(self, v: Self::Input) -> Result<&'a zshm, Self::Error> { + // A ZShm is expected to have only one slice let mut zslices = v.0.zslices(); if let Some(zs) = zslices.next() { if let Some(shmb) = zs.downcast_ref::() { @@ -1576,7 +1576,7 @@ impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { +impl<'a> TryFrom<&'a ZBytes> for &'a zshm { type Error = ZDeserializeError; fn try_from(value: &'a ZBytes) -> Result { @@ -1585,7 +1585,7 @@ impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshm { type Error = ZDeserializeError; fn try_from(value: &'a mut ZBytes) -> Result { @@ -1594,11 +1594,11 @@ impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { +impl<'a> Deserialize<'a, &'a mut zshm> for ZSerde { type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshm, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<&'a mut zshm, Self::Error> { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { @@ -1611,11 +1611,11 @@ impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { +impl<'a> Deserialize<'a, &'a mut zshmmut> for ZSerde { type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshmmut, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<&'a mut zshmmut, Self::Error> { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { @@ -1628,7 +1628,7 @@ impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshmmut { +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshmmut { type Error = ZDeserializeError; fn try_from(value: &'a mut ZBytes) -> Result { @@ -1838,7 +1838,7 @@ mod tests { protocol_id::POSIX_PROTOCOL_ID, }, provider::shared_memory_provider::SharedMemoryProviderBuilder, - slice::zsliceshm::{zsliceshm, ZSliceShm}, + slice::zshm::{zshm, ZShm}, }; const NUM: usize = 1_000; @@ -1964,9 +1964,9 @@ mod tests { let mutable_shm_buf = layout.alloc().res().unwrap(); // convert to immutable SHM buffer - let immutable_shm_buf: ZSliceShm = mutable_shm_buf.into(); + let immutable_shm_buf: ZShm = mutable_shm_buf.into(); - serialize_deserialize!(&zsliceshm, immutable_shm_buf); + serialize_deserialize!(&zshm, immutable_shm_buf); } // Properties diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 6c08303612..791bdbd3ea 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -17,7 +17,7 @@ use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::EncodingId; #[cfg(feature = "shared-memory")] -use zenoh_shm::api::slice::{zsliceshm::ZSliceShm, zsliceshmmut::ZSliceShmMut}; +use zenoh_shm::api::buffer::{zshm::ZShm, zshmmut::ZShmMut}; /// Default encoding values used by Zenoh. /// @@ -835,10 +835,10 @@ impl EncodingMapping for serde_pickle::Value { // - Zenoh SHM #[cfg(feature = "shared-memory")] -impl EncodingMapping for ZSliceShm { +impl EncodingMapping for ZShm { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } #[cfg(feature = "shared-memory")] -impl EncodingMapping for ZSliceShmMut { +impl EncodingMapping for ZShmMut { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 92901c54b3..6e6f7bae64 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -417,6 +417,10 @@ pub mod internal { #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { + pub use zenoh_shm::api::buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, + }; pub use zenoh_shm::api::client::{ shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, }; @@ -441,8 +445,4 @@ pub mod shm { pub use zenoh_shm::api::provider::types::{ AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError, }; - pub use zenoh_shm::api::slice::{ - zsliceshm::{zsliceshm, ZSliceShm}, - zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, - }; } diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index 41e6d14c6e..f8eb11bf63 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -32,38 +32,38 @@ fn shm_bytes_single_buf() { // Prepare a layout for allocations let layout = provider.alloc_layout().size(1024).res().unwrap(); - // allocate an SHM buffer (ZSliceShmMut) + // allocate an SHM buffer (ZShmMut) let owned_shm_buf_mut = layout.alloc().res().unwrap(); - // convert into immutable owned buffer (ZSliceShmMut -> ZSlceShm) - let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); + // convert into immutable owned buffer (ZShmMut -> ZSlceShm) + let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); - // convert again into mutable owned buffer (ZSliceShm -> ZSlceShmMut) - let owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); + // convert again into mutable owned buffer (ZShm -> ZSlceShmMut) + let owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); - // build a ZBytes from an SHM buffer (ZSliceShmMut -> ZBytes) + // build a ZBytes from an SHM buffer (ZShmMut -> ZBytes) let mut payload: ZBytes = owned_shm_buf_mut.into(); // branch to illustrate immutable access to SHM data { - // deserialize ZBytes as an immutably borrowed zsliceshm (ZBytes -> &zsliceshm) - let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); + // deserialize ZBytes as an immutably borrowed zshm (ZBytes -> &zshm) + let borrowed_shm_buf: &zshm = payload.deserialize().unwrap(); - // construct owned buffer from borrowed type (&zsliceshm -> ZSliceShm) + // construct owned buffer from borrowed type (&zshm -> ZShm) let owned = borrowed_shm_buf.to_owned(); - // try to construct mutable ZSliceShmMut (ZSliceShm -> ZSliceShmMut) - let owned_mut: Result = owned.try_into(); - // the attempt fails because ZSliceShm has two existing references ('owned' and inside 'payload') + // try to construct mutable ZShmMut (ZShm -> ZShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZShm has two existing references ('owned' and inside 'payload') assert!(owned_mut.is_err()) } // branch to illustrate mutable access to SHM data { - // deserialize ZBytes as mutably borrowed zsliceshm (ZBytes -> &mut zsliceshm) - let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); + // deserialize ZBytes as mutably borrowed zshm (ZBytes -> &mut zshm) + let borrowed_shm_buf: &mut zshm = payload.deserialize_mut().unwrap(); - // convert zsliceshm to zsliceshmmut (&mut zsliceshm -> &mut zsliceshmmut) - let _borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); + // convert zshm to zshmmut (&mut zshm -> &mut zshmmut) + let _borrowed_shm_buf_mut: &mut zshmmut = borrowed_shm_buf.try_into().unwrap(); } } From ccb960dc7814206e68692898cadeb49189eac133 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 30 Apr 2024 18:40:12 +0300 Subject: [PATCH 314/357] fix ci --- zenoh/src/api/bytes.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index ce88b2bdbe..874f37ba8c 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1838,7 +1838,7 @@ mod tests { protocol_id::POSIX_PROTOCOL_ID, }, provider::shared_memory_provider::SharedMemoryProviderBuilder, - slice::zshm::{zshm, ZShm}, + buffer::zshm::{zshm, ZShm}, }; const NUM: usize = 1_000; From bd5a0da5fa1ad2536d06a72740907b79978ee8cc Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 30 Apr 2024 18:41:55 +0300 Subject: [PATCH 315/357] [skip ci] z_payload_shm -> z_bytes_shm --- examples/Cargo.toml | 4 ++-- examples/examples/{z_payload_shm.rs => z_bytes_shm.rs} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename examples/examples/{z_payload_shm.rs => z_bytes_shm.rs} (100%) diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 602c3833db..263653028a 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -169,6 +169,6 @@ path = "examples/z_alloc_shm.rs" required-features = ["unstable", "shared-memory"] [[example]] -name = "z_payload_shm" -path = "examples/z_payload_shm.rs" +name = "z_bytes_shm" +path = "examples/z_bytes_shm.rs" required-features = ["unstable", "shared-memory"] diff --git a/examples/examples/z_payload_shm.rs b/examples/examples/z_bytes_shm.rs similarity index 100% rename from examples/examples/z_payload_shm.rs rename to examples/examples/z_bytes_shm.rs From 6ea1cc5b2a1d1008288417858eccc8b9dd38d424 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 29 Apr 2024 16:57:35 +0200 Subject: [PATCH 316/357] refactor: use unstable rustfmt config to format imports --- .github/workflows/ci.yml | 2 +- .github/workflows/pre-release.yml | 2 +- commons/zenoh-buffers/src/bbuf.rs | 11 +- commons/zenoh-buffers/src/lib.rs | 6 +- commons/zenoh-buffers/src/slice.rs | 13 +- commons/zenoh-buffers/src/vec.rs | 8 +- commons/zenoh-buffers/src/zbuf.rs | 15 +- commons/zenoh-buffers/src/zslice.rs | 9 +- commons/zenoh-buffers/tests/readwrite.rs | 2 +- commons/zenoh-codec/src/common/extension.rs | 4 +- commons/zenoh-codec/src/core/encoding.rs | 3 +- commons/zenoh-codec/src/core/locator.rs | 4 +- commons/zenoh-codec/src/core/mod.rs | 4 +- commons/zenoh-codec/src/core/shm.rs | 3 +- commons/zenoh-codec/src/core/timestamp.rs | 4 +- commons/zenoh-codec/src/core/wire_expr.rs | 4 +- commons/zenoh-codec/src/core/zbuf.rs | 6 +- commons/zenoh-codec/src/core/zenohid.rs | 4 +- commons/zenoh-codec/src/core/zint.rs | 3 +- commons/zenoh-codec/src/core/zslice.rs | 3 +- commons/zenoh-codec/src/network/declare.rs | 4 +- commons/zenoh-codec/src/network/interest.rs | 3 +- commons/zenoh-codec/src/network/mod.rs | 7 +- commons/zenoh-codec/src/network/oam.rs | 3 +- commons/zenoh-codec/src/network/push.rs | 3 +- commons/zenoh-codec/src/network/request.rs | 7 +- commons/zenoh-codec/src/network/response.rs | 7 +- commons/zenoh-codec/src/scouting/hello.rs | 4 +- commons/zenoh-codec/src/scouting/mod.rs | 3 +- commons/zenoh-codec/src/scouting/scout.rs | 4 +- commons/zenoh-codec/src/transport/batch.rs | 22 +- commons/zenoh-codec/src/transport/close.rs | 3 +- commons/zenoh-codec/src/transport/fragment.rs | 3 +- commons/zenoh-codec/src/transport/frame.rs | 4 +- commons/zenoh-codec/src/transport/init.rs | 7 +- commons/zenoh-codec/src/transport/join.rs | 4 +- .../zenoh-codec/src/transport/keepalive.rs | 3 +- commons/zenoh-codec/src/transport/mod.rs | 3 +- commons/zenoh-codec/src/transport/oam.rs | 3 +- commons/zenoh-codec/src/transport/open.rs | 4 +- commons/zenoh-codec/src/zenoh/del.rs | 4 +- commons/zenoh-codec/src/zenoh/err.rs | 4 +- commons/zenoh-codec/src/zenoh/mod.rs | 11 +- commons/zenoh-codec/src/zenoh/put.rs | 12 +- commons/zenoh-codec/src/zenoh/query.rs | 5 +- commons/zenoh-codec/src/zenoh/reply.rs | 4 +- commons/zenoh-codec/tests/codec.rs | 10 +- .../zenoh-collections/src/single_or_vec.rs | 5 +- commons/zenoh-config/src/connection_retry.rs | 10 +- commons/zenoh-config/src/lib.rs | 9 +- commons/zenoh-config/src/mode_dependent.rs | 4 +- commons/zenoh-core/src/lib.rs | 3 +- commons/zenoh-crypto/src/cipher.rs | 12 +- commons/zenoh-keyexpr/benches/keyexpr_tree.rs | 13 +- .../zenoh-keyexpr/src/key_expr/borrowed.rs | 4 +- commons/zenoh-keyexpr/src/key_expr/canon.rs | 5 +- .../src/key_expr/format/parsing.rs | 3 +- .../src/key_expr/intersect/classical.rs | 3 +- .../src/key_expr/intersect/mod.rs | 3 +- commons/zenoh-keyexpr/src/key_expr/owned.rs | 3 +- commons/zenoh-keyexpr/src/key_expr/tests.rs | 3 +- .../src/keyexpr_tree/arc_tree.rs | 10 +- .../src/keyexpr_tree/box_tree.rs | 14 +- .../src/keyexpr_tree/impls/hashmap_impl.rs | 11 +- .../src/keyexpr_tree/impls/keyed_set_impl.rs | 3 +- .../src/keyexpr_tree/impls/mod.rs | 3 +- .../src/keyexpr_tree/impls/vec_set_impl.rs | 1 + .../src/keyexpr_tree/iters/includer.rs | 3 +- .../src/keyexpr_tree/iters/inclusion.rs | 4 +- .../src/keyexpr_tree/iters/intersection.rs | 4 +- .../src/keyexpr_tree/iters/tree_iter.rs | 3 +- .../zenoh-keyexpr/src/keyexpr_tree/test.rs | 19 +- .../src/keyexpr_tree/traits/default_impls.rs | 1 + .../src/keyexpr_tree/traits/mod.rs | 3 +- .../zenoh-protocol/src/common/extension.rs | 1 + commons/zenoh-protocol/src/core/cowstr.rs | 6 +- commons/zenoh-protocol/src/core/encoding.rs | 1 + commons/zenoh-protocol/src/core/endpoint.rs | 4 +- commons/zenoh-protocol/src/core/locator.rs | 4 +- commons/zenoh-protocol/src/core/mod.rs | 2 +- commons/zenoh-protocol/src/core/properties.rs | 3 +- commons/zenoh-protocol/src/core/resolution.rs | 4 +- commons/zenoh-protocol/src/core/whatami.rs | 3 +- commons/zenoh-protocol/src/core/wire_expr.rs | 1 + commons/zenoh-protocol/src/network/declare.rs | 20 +- .../zenoh-protocol/src/network/interest.rs | 3 +- commons/zenoh-protocol/src/network/mod.rs | 3 +- commons/zenoh-protocol/src/network/request.rs | 6 +- commons/zenoh-protocol/src/scouting/hello.rs | 3 +- .../zenoh-protocol/src/transport/fragment.rs | 3 +- commons/zenoh-protocol/src/transport/frame.rs | 3 +- commons/zenoh-protocol/src/transport/init.rs | 9 +- commons/zenoh-protocol/src/transport/join.rs | 9 +- commons/zenoh-protocol/src/transport/open.rs | 17 +- commons/zenoh-protocol/src/zenoh/del.rs | 7 +- commons/zenoh-protocol/src/zenoh/err.rs | 7 +- commons/zenoh-protocol/src/zenoh/mod.rs | 3 +- commons/zenoh-protocol/src/zenoh/put.rs | 7 +- commons/zenoh-protocol/src/zenoh/query.rs | 6 +- commons/zenoh-protocol/src/zenoh/reply.rs | 3 +- commons/zenoh-result/src/lib.rs | 3 +- commons/zenoh-runtime/src/lib.rs | 5 +- .../src/api/client/shared_memory_client.rs | 7 +- .../src/api/client/shared_memory_segment.rs | 4 +- .../zenoh-shm/src/api/client_storage/mod.rs | 21 +- .../posix/posix_shared_memory_client.rs | 3 +- .../posix_shared_memory_provider_backend.rs | 3 +- .../posix/posix_shared_memory_segment.rs | 11 +- .../api/provider/shared_memory_provider.rs | 11 +- commons/zenoh-shm/src/api/provider/types.rs | 3 +- commons/zenoh-shm/src/api/slice/zsliceshm.rs | 3 +- .../zenoh-shm/src/api/slice/zsliceshmmut.rs | 3 +- commons/zenoh-shm/src/header/segment.rs | 3 +- commons/zenoh-shm/src/lib.rs | 5 +- commons/zenoh-shm/src/watchdog/segment.rs | 3 +- commons/zenoh-sync/src/condition.rs | 3 +- commons/zenoh-sync/src/fifo_queue.rs | 3 +- commons/zenoh-sync/src/lib.rs | 9 +- commons/zenoh-sync/src/lifo_queue.rs | 1 + commons/zenoh-sync/src/mvar.rs | 8 +- commons/zenoh-sync/src/object_pool.rs | 4 +- commons/zenoh-sync/src/signal.rs | 10 +- commons/zenoh-task/src/lib.rs | 7 +- commons/zenoh-util/src/std_only/ffi/win.rs | 8 +- commons/zenoh-util/src/std_only/lib_loader.rs | 11 +- commons/zenoh-util/src/std_only/net/mod.rs | 16 +- commons/zenoh-util/src/std_only/time_range.rs | 3 +- commons/zenoh-util/src/std_only/timer.rs | 33 +-- examples/examples/z_get.rs | 3 +- examples/examples/z_get_liveliness.rs | 3 +- examples/examples/z_ping.rs | 3 +- examples/examples/z_ping_shm.rs | 3 +- examples/examples/z_pub.rs | 3 +- examples/examples/z_pub_thr.rs | 3 +- examples/examples/z_pull.rs | 3 +- examples/examples/z_storage.rs | 3 +- examples/examples/z_sub_shm.rs | 3 +- examples/examples/z_sub_thr.rs | 3 +- io/zenoh-link-commons/src/lib.rs | 6 +- io/zenoh-link-commons/src/listener.rs | 10 +- io/zenoh-link-commons/src/multicast.rs | 3 +- io/zenoh-link-commons/src/tls.rs | 1 + io/zenoh-link-commons/src/unicast.rs | 3 +- io/zenoh-link/src/lib.rs | 65 +++--- io/zenoh-links/zenoh-link-quic/src/lib.rs | 1 - io/zenoh-links/zenoh-link-quic/src/unicast.rs | 27 ++- io/zenoh-links/zenoh-link-quic/src/utils.rs | 35 +-- io/zenoh-links/zenoh-link-quic/src/verify.rs | 4 +- io/zenoh-links/zenoh-link-serial/src/lib.rs | 9 +- .../zenoh-link-serial/src/unicast.rs | 35 +-- io/zenoh-links/zenoh-link-tcp/src/lib.rs | 9 +- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 20 +- io/zenoh-links/zenoh-link-tls/src/unicast.rs | 30 +-- io/zenoh-links/zenoh-link-tls/src/utils.rs | 28 +-- io/zenoh-links/zenoh-link-udp/src/lib.rs | 9 +- .../zenoh-link-udp/src/multicast.rs | 21 +- io/zenoh-links/zenoh-link-udp/src/unicast.rs | 29 +-- .../zenoh-link-unixpipe/src/unix/unicast.rs | 44 ++-- .../zenoh-link-unixsock_stream/src/lib.rs | 6 +- .../zenoh-link-unixsock_stream/src/unicast.rs | 35 +-- .../zenoh-link-vsock/src/unicast.rs | 23 +- io/zenoh-links/zenoh-link-ws/src/lib.rs | 9 +- io/zenoh-links/zenoh-link-ws/src/unicast.rs | 39 ++-- io/zenoh-transport/src/common/batch.rs | 4 +- .../src/common/defragmentation.rs | 3 +- io/zenoh-transport/src/common/pipeline.rs | 62 +++--- io/zenoh-transport/src/common/priority.rs | 8 +- io/zenoh-transport/src/common/stats.rs | 3 +- io/zenoh-transport/src/lib.rs | 13 +- io/zenoh-transport/src/manager.rs | 24 ++- .../src/multicast/establishment.rs | 20 +- io/zenoh-transport/src/multicast/link.rs | 28 +-- io/zenoh-transport/src/multicast/manager.rs | 12 +- io/zenoh-transport/src/multicast/mod.rs | 16 +- io/zenoh-transport/src/multicast/rx.rs | 12 +- io/zenoh-transport/src/multicast/transport.rs | 34 +-- io/zenoh-transport/src/multicast/tx.rs | 2 +- io/zenoh-transport/src/shm.rs | 1 + .../src/unicast/establishment/accept.rs | 35 +-- .../src/unicast/establishment/cookie.rs | 7 +- .../src/unicast/establishment/ext/auth/mod.rs | 12 +- .../unicast/establishment/ext/auth/pubkey.rs | 9 +- .../unicast/establishment/ext/auth/usrpwd.rs | 13 +- .../unicast/establishment/ext/compression.rs | 6 +- .../unicast/establishment/ext/lowlatency.rs | 6 +- .../unicast/establishment/ext/multilink.rs | 9 +- .../src/unicast/establishment/ext/qos.rs | 6 +- .../src/unicast/establishment/ext/shm.rs | 6 +- .../src/unicast/establishment/mod.rs | 3 +- .../src/unicast/establishment/open.rs | 32 +-- io/zenoh-transport/src/unicast/link.rs | 7 +- .../src/unicast/lowlatency/link.rs | 17 +- .../src/unicast/lowlatency/rx.rs | 3 +- .../src/unicast/lowlatency/transport.rs | 34 +-- .../src/unicast/lowlatency/tx.rs | 2 +- io/zenoh-transport/src/unicast/manager.rs | 34 +-- io/zenoh-transport/src/unicast/mod.rs | 18 +- .../src/unicast/test_helpers.rs | 3 +- .../src/unicast/transport_unicast_inner.rs | 11 +- .../src/unicast/universal/link.rs | 18 +- .../src/unicast/universal/rx.rs | 20 +- .../src/unicast/universal/transport.rs | 30 +-- .../src/unicast/universal/tx.rs | 2 +- io/zenoh-transport/tests/endpoints.rs | 1 + .../tests/multicast_compression.rs | 1 + .../tests/multicast_transport.rs | 1 + .../tests/transport_whitelist.rs | 1 + .../tests/unicast_authenticator.rs | 9 +- .../tests/unicast_compression.rs | 3 +- .../tests/unicast_concurrent.rs | 15 +- .../tests/unicast_defragmentation.rs | 1 + .../tests/unicast_intermittent.rs | 17 +- io/zenoh-transport/tests/unicast_multilink.rs | 1 + io/zenoh-transport/tests/unicast_openclose.rs | 2 +- .../tests/unicast_priorities.rs | 20 +- io/zenoh-transport/tests/unicast_shm.rs | 1 + .../tests/unicast_simultaneous.rs | 15 +- io/zenoh-transport/tests/unicast_time.rs | 1 + io/zenoh-transport/tests/unicast_transport.rs | 5 +- plugins/zenoh-backend-example/src/lib.rs | 3 +- plugins/zenoh-backend-traits/src/config.rs | 9 +- plugins/zenoh-backend-traits/src/lib.rs | 10 +- plugins/zenoh-plugin-example/src/lib.rs | 29 +-- .../zenoh-plugin-rest/examples/z_serve_sse.rs | 13 +- plugins/zenoh-plugin-rest/src/config.rs | 10 +- plugins/zenoh-plugin-rest/src/lib.rs | 34 ++- .../src/backends_mgt.rs | 12 +- .../zenoh-plugin-storage-manager/src/lib.rs | 46 ++-- .../src/memory_backend/mod.rs | 15 +- .../src/replica/align_queryable.rs | 15 +- .../src/replica/aligner.rs | 11 +- .../src/replica/digest.rs | 16 +- .../src/replica/mod.rs | 22 +- .../src/replica/snapshotter.rs | 24 ++- .../src/replica/storage.rs | 56 ++--- .../src/storages_mgt.rs | 3 +- .../tests/operations.rs | 6 +- .../tests/wildcard.rs | 6 +- plugins/zenoh-plugin-trait/src/manager.rs | 2 +- .../src/manager/dynamic_plugin.rs | 3 +- .../src/manager/static_plugin.rs | 4 +- plugins/zenoh-plugin-trait/src/plugin.rs | 6 +- zenoh-ext/examples/examples/z_member.rs | 4 +- zenoh-ext/examples/examples/z_pub_cache.rs | 9 +- zenoh-ext/examples/examples/z_query_sub.rs | 6 +- zenoh-ext/examples/examples/z_view_size.rs | 4 +- zenoh-ext/src/group.rs | 22 +- zenoh-ext/src/lib.rs | 7 +- zenoh-ext/src/publication_cache.rs | 33 +-- zenoh-ext/src/querying_subscriber.rs | 40 ++-- zenoh-ext/src/session_ext.rs | 7 +- zenoh-ext/src/subscriber_ext.rs | 16 +- zenoh/src/api/admin.rs | 19 +- zenoh/src/api/builders/publication.rs | 31 +-- zenoh/src/api/builders/sample.rs | 23 +- zenoh/src/api/bytes.rs | 11 +- zenoh/src/api/encoding.rs | 6 +- zenoh/src/api/handlers/ring.rs | 7 +- zenoh/src/api/info.rs | 4 +- zenoh/src/api/key_expr.rs | 9 +- zenoh/src/api/liveliness.rs | 19 +- zenoh/src/api/loader.rs | 5 +- zenoh/src/api/plugins.rs | 5 +- zenoh/src/api/publication.rs | 39 ++-- zenoh/src/api/query.rs | 22 +- zenoh/src/api/queryable.rs | 34 +-- zenoh/src/api/sample.rs | 15 +- zenoh/src/api/scouting.rs | 18 +- zenoh/src/api/selector.rs | 4 +- zenoh/src/api/session.rs | 54 ++--- zenoh/src/api/subscriber.rs | 22 +- zenoh/src/api/time.rs | 1 + zenoh/src/lib.rs | 199 ++++++++---------- zenoh/src/net/codec/linkstate.rs | 12 +- zenoh/src/net/primitives/demux.rs | 13 +- zenoh/src/net/primitives/mux.rs | 14 +- zenoh/src/net/routing/dispatcher/face.rs | 28 ++- zenoh/src/net/routing/dispatcher/pubsub.rs | 25 ++- zenoh/src/net/routing/dispatcher/queries.rs | 22 +- zenoh/src/net/routing/dispatcher/resource.rs | 26 ++- zenoh/src/net/routing/dispatcher/tables.rs | 37 ++-- zenoh/src/net/routing/hat/client/mod.rs | 37 ++-- zenoh/src/net/routing/hat/client/pubsub.rs | 37 ++-- zenoh/src/net/routing/hat/client/queries.rs | 44 ++-- .../src/net/routing/hat/linkstate_peer/mod.rs | 48 ++--- .../net/routing/hat/linkstate_peer/network.rs | 36 ++-- .../net/routing/hat/linkstate_peer/pubsub.rs | 43 ++-- .../net/routing/hat/linkstate_peer/queries.rs | 50 +++-- zenoh/src/net/routing/hat/mod.rs | 18 +- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 28 ++- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 49 +++-- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 37 ++-- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 44 ++-- zenoh/src/net/routing/hat/router/mod.rs | 56 ++--- zenoh/src/net/routing/hat/router/network.rs | 35 +-- zenoh/src/net/routing/hat/router/pubsub.rs | 43 ++-- zenoh/src/net/routing/hat/router/queries.rs | 50 +++-- .../net/routing/interceptor/access_control.rs | 16 +- .../net/routing/interceptor/authorization.rs | 9 +- .../net/routing/interceptor/downsampling.rs | 16 +- zenoh/src/net/routing/interceptor/mod.rs | 5 +- zenoh/src/net/routing/mod.rs | 10 +- zenoh/src/net/routing/router.rs | 43 ++-- zenoh/src/net/runtime/adminspace.rs | 48 +++-- zenoh/src/net/runtime/mod.rs | 43 ++-- zenoh/src/net/runtime/orchestrator.rs | 16 +- zenoh/src/net/tests/tables.rs | 34 +-- zenoh/src/prelude.rs | 68 +++--- zenoh/tests/acl.rs | 7 +- zenoh/tests/attachments.rs | 3 +- zenoh/tests/events.rs | 4 +- zenoh/tests/handler.rs | 1 + zenoh/tests/interceptors.rs | 4 +- zenoh/tests/liveliness.rs | 4 +- zenoh/tests/qos.rs | 4 +- zenoh/tests/routing.rs | 21 +- zenoh/tests/session.rs | 14 +- zenoh/tests/shm.rs | 14 +- zenoh/tests/unicity.rs | 14 +- zenohd/src/main.rs | 14 +- 320 files changed, 2384 insertions(+), 1882 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 084a3b148f..cb57db3abe 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,7 +47,7 @@ jobs: uses: Swatinem/rust-cache@v2 - name: Code format check - run: cargo fmt --check + run: cargo fmt --check -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate" - name: Clippy run: cargo +stable clippy --all-targets -- --deny warnings diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 9452e0da86..bb245d4747 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -53,7 +53,7 @@ jobs: run: rustup component add rustfmt clippy - name: Code format check - run: cargo fmt --check + run: cargo fmt --check -- cargo fmt --check -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate" env: CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse diff --git a/commons/zenoh-buffers/src/bbuf.rs b/commons/zenoh-buffers/src/bbuf.rs index 687961aa5e..72491ae704 100644 --- a/commons/zenoh-buffers/src/bbuf.rs +++ b/commons/zenoh-buffers/src/bbuf.rs @@ -11,6 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // +#[cfg(not(feature = "std"))] +use alloc::boxed::Box; +use alloc::sync::Arc; +use core::{fmt, num::NonZeroUsize, option}; + use crate::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, @@ -18,11 +23,6 @@ use crate::{ writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, ZSlice, }; -use alloc::sync::Arc; -use core::{fmt, num::NonZeroUsize, option}; - -#[cfg(not(feature = "std"))] -use alloc::boxed::Box; #[derive(Clone, PartialEq, Eq)] pub struct BBuf { @@ -199,6 +199,7 @@ impl BBuf { pub fn rand(len: usize) -> Self { #[cfg(not(feature = "std"))] use alloc::vec::Vec; + use rand::Rng; let mut rng = rand::thread_rng(); diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index 117fb412b7..da0cdd4030 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -113,9 +113,10 @@ pub mod buffer { } pub mod writer { - use crate::ZSlice; use core::num::NonZeroUsize; + use crate::ZSlice; + #[derive(Debug, Clone, Copy)] pub struct DidntWrite; @@ -156,9 +157,10 @@ pub mod writer { } pub mod reader { - use crate::ZSlice; use core::num::NonZeroUsize; + use crate::ZSlice; + #[derive(Debug, Clone, Copy)] pub struct DidntRead; diff --git a/commons/zenoh-buffers/src/slice.rs b/commons/zenoh-buffers/src/slice.rs index a652c6930e..f26e37a2aa 100644 --- a/commons/zenoh-buffers/src/slice.rs +++ b/commons/zenoh-buffers/src/slice.rs @@ -11,12 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - buffer::{Buffer, SplitBuffer}, - reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, - writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - ZSlice, -}; use core::{ marker::PhantomData, mem, @@ -25,6 +19,13 @@ use core::{ slice::{self}, }; +use crate::{ + buffer::{Buffer, SplitBuffer}, + reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, + writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, + ZSlice, +}; + // Buffer impl Buffer for &[u8] { #[inline(always)] diff --git a/commons/zenoh-buffers/src/vec.rs b/commons/zenoh-buffers/src/vec.rs index bc2edf87bb..9d63880aea 100644 --- a/commons/zenoh-buffers/src/vec.rs +++ b/commons/zenoh-buffers/src/vec.rs @@ -11,15 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +use core::{mem, num::NonZeroUsize, option}; + use crate::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, }; -use core::{mem, num::NonZeroUsize, option}; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; /// Allocate a vector with a given capacity and sets the length to that capacity. #[must_use] diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 616dbb1b96..50eb54c923 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -11,6 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // +use alloc::{sync::Arc, vec::Vec}; +use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; +#[cfg(feature = "std")] +use std::io; + +use zenoh_collections::SingleOrVec; + #[cfg(feature = "shared-memory")] use crate::ZSliceKind; use crate::{ @@ -22,11 +29,6 @@ use crate::{ writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, ZSlice, ZSliceBuffer, }; -use alloc::{sync::Arc, vec::Vec}; -use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; -#[cfg(feature = "std")] -use std::io; -use zenoh_collections::SingleOrVec; fn get_mut_unchecked(arc: &mut Arc) -> &mut T { unsafe { &mut (*(Arc::as_ptr(arc) as *mut T)) } @@ -776,9 +778,10 @@ mod tests { #[cfg(feature = "std")] #[test] fn zbuf_seek() { + use std::io::Seek; + use super::{HasReader, ZBuf}; use crate::reader::Reader; - use std::io::Seek; let mut buf = ZBuf::empty(); buf.push_zslice([0u8, 1u8, 2u8, 3u8].into()); diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 60dbdab5e1..c169fcd4c0 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -11,10 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - buffer::{Buffer, SplitBuffer}, - reader::{BacktrackableReader, DidntRead, HasReader, Reader}, -}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use core::{ any::Any, @@ -25,6 +21,11 @@ use core::{ option, }; +use crate::{ + buffer::{Buffer, SplitBuffer}, + reader::{BacktrackableReader, DidntRead, HasReader, Reader}, +}; + /*************************************/ /* ZSLICE BUFFER */ /*************************************/ diff --git a/commons/zenoh-buffers/tests/readwrite.rs b/commons/zenoh-buffers/tests/readwrite.rs index ea48218a85..cdfc8fea05 100644 --- a/commons/zenoh-buffers/tests/readwrite.rs +++ b/commons/zenoh-buffers/tests/readwrite.rs @@ -14,8 +14,8 @@ use zenoh_buffers::{ reader::{HasReader, Reader, SiphonableReader}, writer::{BacktrackableWriter, HasWriter, Writer}, + BBuf, ZBuf, ZSlice, }; -use zenoh_buffers::{BBuf, ZBuf, ZSlice}; const BYTES: usize = 18; diff --git a/commons/zenoh-codec/src/common/extension.rs b/commons/zenoh-codec/src/common/extension.rs index 6c22f8ff01..21d716a769 100644 --- a/commons/zenoh-codec/src/common/extension.rs +++ b/commons/zenoh-codec/src/common/extension.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_protocol::common::{ iext, imsg::has_flag, ZExtBody, ZExtUnit, ZExtUnknown, ZExtZ64, ZExtZBuf, ZExtZBufHeader, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; + fn read_inner(reader: &mut R, _s: &str, header: u8) -> Result<(ZExtUnknown, bool), DidntRead> where R: Reader, diff --git a/commons/zenoh-codec/src/core/encoding.rs b/commons/zenoh-codec/src/core/encoding.rs index c8033cdd5f..abe33f6ab8 100644 --- a/commons/zenoh-codec/src/core/encoding.rs +++ b/commons/zenoh-codec/src/core/encoding.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -21,6 +20,8 @@ use zenoh_protocol::{ core::encoding::{flag, Encoding, EncodingId}, }; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + impl LCodec<&Encoding> for Zenoh080 { fn w_len(self, x: &Encoding) -> usize { let mut len = self.w_len((x.id as u32) << 1); diff --git a/commons/zenoh-codec/src/core/locator.rs b/commons/zenoh-codec/src/core/locator.rs index 0bbd28a189..464b1bbb05 100644 --- a/commons/zenoh-codec/src/core/locator.rs +++ b/commons/zenoh-codec/src/core/locator.rs @@ -11,15 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use alloc::{string::String, vec::Vec}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::core::Locator; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + impl WCodec<&Locator, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/core/mod.rs b/commons/zenoh-codec/src/core/mod.rs index c8e19f057f..8230cdd9ac 100644 --- a/commons/zenoh-codec/src/core/mod.rs +++ b/commons/zenoh-codec/src/core/mod.rs @@ -22,13 +22,15 @@ mod zenohid; mod zint; mod zslice; -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use alloc::{string::String, vec::Vec}; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + // [u8; N] macro_rules! array_impl { ($n:expr) => { diff --git a/commons/zenoh-codec/src/core/shm.rs b/commons/zenoh-codec/src/core/shm.rs index 2548e4ed14..e25496a268 100644 --- a/commons/zenoh-codec/src/core/shm.rs +++ b/commons/zenoh-codec/src/core/shm.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -21,6 +20,8 @@ use zenoh_shm::{ watchdog::descriptor::Descriptor, SharedMemoryBufInfo, }; +use crate::{RCodec, WCodec, Zenoh080}; + impl WCodec<&Descriptor, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/core/timestamp.rs b/commons/zenoh-codec/src/core/timestamp.rs index 4891643192..025f8f8bf5 100644 --- a/commons/zenoh-codec/src/core/timestamp.rs +++ b/commons/zenoh-codec/src/core/timestamp.rs @@ -11,14 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::core::{Timestamp, ZenohId}; +use crate::{LCodec, RCodec, WCodec, Zenoh080}; + impl LCodec<&Timestamp> for Zenoh080 { fn w_len(self, x: &Timestamp) -> usize { self.w_len(x.get_time().as_u64()) + self.w_len(x.get_id().size()) diff --git a/commons/zenoh-codec/src/core/wire_expr.rs b/commons/zenoh-codec/src/core/wire_expr.rs index aa6f77b379..d5b91f75ed 100644 --- a/commons/zenoh-codec/src/core/wire_expr.rs +++ b/commons/zenoh-codec/src/core/wire_expr.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{core::Zenoh080Bounded, RCodec, WCodec, Zenoh080, Zenoh080Condition}; use alloc::string::String; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_protocol::{ network::Mapping, }; +use crate::{core::Zenoh080Bounded, RCodec, WCodec, Zenoh080, Zenoh080Condition}; + impl WCodec<&WireExpr<'_>, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/core/zbuf.rs b/commons/zenoh-codec/src/core/zbuf.rs index 137030e66c..8b8ead6ca0 100644 --- a/commons/zenoh-codec/src/core/zbuf.rs +++ b/commons/zenoh-codec/src/core/zbuf.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ buffer::Buffer, reader::{DidntRead, Reader}, @@ -19,6 +18,8 @@ use zenoh_buffers::{ ZBuf, }; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + // ZBuf bounded macro_rules! zbuf_impl { ($bound:ty) => { @@ -100,9 +101,10 @@ impl LCodec<&ZBuf> for Zenoh080 { // ZBuf sliced #[cfg(feature = "shared-memory")] mod shm { + use zenoh_buffers::{ZSlice, ZSliceKind}; + use super::*; use crate::Zenoh080Sliced; - use zenoh_buffers::{ZSlice, ZSliceKind}; const RAW: u8 = 0; const SHM_PTR: u8 = 1; diff --git a/commons/zenoh-codec/src/core/zenohid.rs b/commons/zenoh-codec/src/core/zenohid.rs index 6c53d4e63f..5098cad534 100644 --- a/commons/zenoh-codec/src/core/zenohid.rs +++ b/commons/zenoh-codec/src/core/zenohid.rs @@ -11,14 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Length}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::core::ZenohId; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Length}; + impl LCodec<&ZenohId> for Zenoh080 { fn w_len(self, x: &ZenohId) -> usize { x.size() diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index d5160e2ee6..a29f88f3d5 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -11,12 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + const VLE_LEN_MAX: usize = vle_len(u64::MAX); const fn vle_len(x: u64) -> usize { diff --git a/commons/zenoh-codec/src/core/zslice.rs b/commons/zenoh-codec/src/core/zslice.rs index cea0961b51..fe907ed273 100644 --- a/commons/zenoh-codec/src/core/zslice.rs +++ b/commons/zenoh-codec/src/core/zslice.rs @@ -11,13 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, ZSlice, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + // ZSlice - Bounded macro_rules! zslice_impl { ($bound:ty) => { diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index ed3d019950..faffb04952 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; use alloc::string::String; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, HasWriter, Writer}, @@ -27,6 +27,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; + // Declaration impl WCodec<&DeclareBody, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/interest.rs b/commons/zenoh-codec/src/network/interest.rs index 852e106f98..2deda7748a 100644 --- a/commons/zenoh-codec/src/network/interest.rs +++ b/commons/zenoh-codec/src/network/interest.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -29,6 +28,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; + // Interest impl WCodec<&Interest, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index 5ebdb17b8e..fe9d254ee8 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -18,9 +18,6 @@ mod push; mod request; mod response; -use crate::{ - LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length, Zenoh080Reliability, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -31,6 +28,10 @@ use zenoh_protocol::{ network::{ext::EntityGlobalIdType, *}, }; +use crate::{ + LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length, Zenoh080Reliability, +}; + // NetworkMessage impl WCodec<&NetworkMessage, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/oam.rs b/commons/zenoh-codec/src/network/oam.rs index 9751e9952d..172b3f1058 100644 --- a/commons/zenoh-codec/src/network/oam.rs +++ b/commons/zenoh-codec/src/network/oam.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +24,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Oam, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/network/push.rs b/commons/zenoh-codec/src/network/push.rs index b9ec2ba5db..2c2e11a718 100644 --- a/commons/zenoh-codec/src/network/push.rs +++ b/commons/zenoh-codec/src/network/push.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +26,8 @@ use zenoh_protocol::{ zenoh::PushBody, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; + impl WCodec<&Push, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/network/request.rs b/commons/zenoh-codec/src/network/request.rs index 6173840d7e..21f42709c4 100644 --- a/commons/zenoh-codec/src/network/request.rs +++ b/commons/zenoh-codec/src/network/request.rs @@ -11,9 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -29,6 +26,10 @@ use zenoh_protocol::{ zenoh::RequestBody, }; +use crate::{ + common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, +}; + // Target impl WCodec<(&ext::TargetType, bool), &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/response.rs b/commons/zenoh-codec/src/network/response.rs index 5b69e8b109..d94316de8e 100644 --- a/commons/zenoh-codec/src/network/response.rs +++ b/commons/zenoh-codec/src/network/response.rs @@ -11,9 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -29,6 +26,10 @@ use zenoh_protocol::{ zenoh::ResponseBody, }; +use crate::{ + common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, +}; + // Response impl WCodec<&Response, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/scouting/hello.rs b/commons/zenoh-codec/src/scouting/hello.rs index 430201133e..c3aff83667 100644 --- a/commons/zenoh-codec/src/scouting/hello.rs +++ b/commons/zenoh-codec/src/scouting/hello.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use alloc::{vec, vec::Vec}; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -26,6 +26,8 @@ use zenoh_protocol::{ }, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + impl WCodec<&Hello, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/scouting/mod.rs b/commons/zenoh-codec/src/scouting/mod.rs index bbedce4282..d1f0b883a1 100644 --- a/commons/zenoh-codec/src/scouting/mod.rs +++ b/commons/zenoh-codec/src/scouting/mod.rs @@ -14,7 +14,6 @@ mod hello; mod scout; -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -24,6 +23,8 @@ use zenoh_protocol::{ scouting::{id, ScoutingBody, ScoutingMessage}, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&ScoutingMessage, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/scouting/scout.rs b/commons/zenoh-codec/src/scouting/scout.rs index 02d5294047..888ce2954f 100644 --- a/commons/zenoh-codec/src/scouting/scout.rs +++ b/commons/zenoh-codec/src/scouting/scout.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -26,6 +26,8 @@ use zenoh_protocol::{ }, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + impl WCodec<&Scout, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/transport/batch.rs b/commons/zenoh-codec/src/transport/batch.rs index 525336d6e8..a08e796358 100644 --- a/commons/zenoh-codec/src/transport/batch.rs +++ b/commons/zenoh-codec/src/transport/batch.rs @@ -11,17 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080}; use core::num::NonZeroUsize; -use zenoh_buffers::reader::{BacktrackableReader, DidntRead, Reader, SiphonableReader}; -use zenoh_buffers::writer::{BacktrackableWriter, DidntWrite, Writer}; -use zenoh_buffers::ZBufReader; -use zenoh_protocol::core::Reliability; -use zenoh_protocol::network::NetworkMessage; -use zenoh_protocol::transport::{ - Fragment, FragmentHeader, Frame, FrameHeader, TransportBody, TransportMessage, TransportSn, + +use zenoh_buffers::{ + reader::{BacktrackableReader, DidntRead, Reader, SiphonableReader}, + writer::{BacktrackableWriter, DidntWrite, Writer}, + ZBufReader, +}; +use zenoh_protocol::{ + core::Reliability, + network::NetworkMessage, + transport::{ + Fragment, FragmentHeader, Frame, FrameHeader, TransportBody, TransportMessage, TransportSn, + }, }; +use crate::{RCodec, WCodec, Zenoh080}; + #[derive(Clone, Copy, Debug)] #[repr(u8)] pub enum CurrentFrame { diff --git a/commons/zenoh-codec/src/transport/close.rs b/commons/zenoh-codec/src/transport/close.rs index 9771b9e1e9..62d9e542b7 100644 --- a/commons/zenoh-codec/src/transport/close.rs +++ b/commons/zenoh-codec/src/transport/close.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -24,6 +23,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Close, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/transport/fragment.rs b/commons/zenoh-codec/src/transport/fragment.rs index b01e2c2bae..fc30abce9d 100644 --- a/commons/zenoh-codec/src/transport/fragment.rs +++ b/commons/zenoh-codec/src/transport/fragment.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{BacktrackableReader, DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +24,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + // FragmentHeader impl WCodec<&FragmentHeader, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/transport/frame.rs b/commons/zenoh-codec/src/transport/frame.rs index ab82a024c4..6db4e70652 100644 --- a/commons/zenoh-codec/src/transport/frame.rs +++ b/commons/zenoh-codec/src/transport/frame.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Reliability}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{BacktrackableReader, DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +27,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Reliability}; + // FrameHeader impl WCodec<&FrameHeader, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/transport/init.rs b/commons/zenoh-codec/src/transport/init.rs index fec9f07afd..55e129799c 100644 --- a/commons/zenoh-codec/src/transport/init.rs +++ b/commons/zenoh-codec/src/transport/init.rs @@ -11,9 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header, Zenoh080Length, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -29,6 +26,10 @@ use zenoh_protocol::{ }, }; +use crate::{ + common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header, Zenoh080Length, +}; + // InitSyn impl WCodec<&InitSyn, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/transport/join.rs b/commons/zenoh-codec/src/transport/join.rs index d87ceecc78..896d7f6290 100644 --- a/commons/zenoh-codec/src/transport/join.rs +++ b/commons/zenoh-codec/src/transport/join.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use alloc::boxed::Box; use core::time::Duration; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -28,6 +28,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + impl LCodec<&PrioritySn> for Zenoh080 { fn w_len(self, p: &PrioritySn) -> usize { let PrioritySn { diff --git a/commons/zenoh-codec/src/transport/keepalive.rs b/commons/zenoh-codec/src/transport/keepalive.rs index aa6726f50b..44ef4c676a 100644 --- a/commons/zenoh-codec/src/transport/keepalive.rs +++ b/commons/zenoh-codec/src/transport/keepalive.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -24,6 +23,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&KeepAlive, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/transport/mod.rs b/commons/zenoh-codec/src/transport/mod.rs index 559b5b5fda..3adae0fb72 100644 --- a/commons/zenoh-codec/src/transport/mod.rs +++ b/commons/zenoh-codec/src/transport/mod.rs @@ -21,7 +21,6 @@ mod keepalive; mod oam; mod open; -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{BacktrackableReader, DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -32,6 +31,8 @@ use zenoh_protocol::{ transport::*, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; + // TransportMessageLowLatency impl WCodec<&TransportMessageLowLatency, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/transport/oam.rs b/commons/zenoh-codec/src/transport/oam.rs index 6861f638d3..156a0ce1ff 100644 --- a/commons/zenoh-codec/src/transport/oam.rs +++ b/commons/zenoh-codec/src/transport/oam.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +24,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Oam, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/transport/open.rs b/commons/zenoh-codec/src/transport/open.rs index d539526715..712fe5ca95 100644 --- a/commons/zenoh-codec/src/transport/open.rs +++ b/commons/zenoh-codec/src/transport/open.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use core::time::Duration; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +27,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + // OpenSyn impl WCodec<&OpenSyn, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/zenoh/del.rs b/commons/zenoh-codec/src/zenoh/del.rs index 3d0a64f428..07df1affc7 100644 --- a/commons/zenoh-codec/src/zenoh/del.rs +++ b/commons/zenoh-codec/src/zenoh/del.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +25,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Del, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/zenoh/err.rs b/commons/zenoh-codec/src/zenoh/err.rs index 5291645bf0..e19b11f70d 100644 --- a/commons/zenoh-codec/src/zenoh/err.rs +++ b/commons/zenoh-codec/src/zenoh/err.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +27,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; + impl WCodec<&Err, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index dc38e5ee84..aeb8f53102 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -17,11 +17,6 @@ pub mod put; pub mod query; pub mod reply; -#[cfg(not(feature = "shared-memory"))] -use crate::Zenoh080Bounded; -#[cfg(feature = "shared-memory")] -use crate::Zenoh080Sliced; -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -35,6 +30,12 @@ use zenoh_protocol::{ zenoh::{ext, id, PushBody, RequestBody, ResponseBody}, }; +#[cfg(not(feature = "shared-memory"))] +use crate::Zenoh080Bounded; +#[cfg(feature = "shared-memory")] +use crate::Zenoh080Sliced; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + // Push impl WCodec<&PushBody, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/zenoh/put.rs b/commons/zenoh-codec/src/zenoh/put.rs index 776b47245f..c10a98f6d8 100644 --- a/commons/zenoh-codec/src/zenoh/put.rs +++ b/commons/zenoh-codec/src/zenoh/put.rs @@ -11,12 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(not(feature = "shared-memory"))] -use crate::Zenoh080Bounded; -#[cfg(feature = "shared-memory")] -use crate::Zenoh080Sliced; -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -31,6 +27,12 @@ use zenoh_protocol::{ }, }; +#[cfg(not(feature = "shared-memory"))] +use crate::Zenoh080Bounded; +#[cfg(feature = "shared-memory")] +use crate::Zenoh080Sliced; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Put, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index efac7b5671..c9b1cc196e 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -11,13 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::{string::String, vec::Vec}; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; - use zenoh_protocol::{ common::{iext, imsg}, zenoh::{ @@ -26,6 +25,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + // Consolidation impl WCodec for Zenoh080 where diff --git a/commons/zenoh-codec/src/zenoh/reply.rs b/commons/zenoh-codec/src/zenoh/reply.rs index 308004a1c2..a8d6a2afdc 100644 --- a/commons/zenoh-codec/src/zenoh/reply.rs +++ b/commons/zenoh-codec/src/zenoh/reply.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -26,6 +26,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Reply, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index e9b8140f21..c26b681336 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::convert::TryFrom; + use rand::{ distributions::{Alphanumeric, DistString}, *, }; -use std::convert::TryFrom; use zenoh_buffers::{ reader::{HasReader, Reader}, writer::HasWriter, @@ -360,9 +361,10 @@ fn codec_encoding() { #[cfg(feature = "shared-memory")] #[test] fn codec_shm_info() { - use zenoh_shm::api::provider::chunk::ChunkDescriptor; - use zenoh_shm::header::descriptor::HeaderDescriptor; - use zenoh_shm::{watchdog::descriptor::Descriptor, SharedMemoryBufInfo}; + use zenoh_shm::{ + api::provider::chunk::ChunkDescriptor, header::descriptor::HeaderDescriptor, + watchdog::descriptor::Descriptor, SharedMemoryBufInfo, + }; run!(SharedMemoryBufInfo, { let mut rng = rand::thread_rng(); diff --git a/commons/zenoh-collections/src/single_or_vec.rs b/commons/zenoh-collections/src/single_or_vec.rs index ed82bf49af..7b2391197d 100644 --- a/commons/zenoh-collections/src/single_or_vec.rs +++ b/commons/zenoh-collections/src/single_or_vec.rs @@ -13,6 +13,8 @@ // use alloc::vec; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; use core::{ cmp::PartialEq, fmt, iter, @@ -20,9 +22,6 @@ use core::{ ptr, slice, }; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - #[derive(Clone, Eq)] enum SingleOrVecInner { Single(T), diff --git a/commons/zenoh-config/src/connection_retry.rs b/commons/zenoh-config/src/connection_retry.rs index a845fbfe6a..e5f88a05f3 100644 --- a/commons/zenoh-config/src/connection_retry.rs +++ b/commons/zenoh-config/src/connection_retry.rs @@ -12,18 +12,18 @@ // ZettaScale Zenoh Team, // +use serde::{Deserialize, Serialize}; +use zenoh_core::zparse_default; +use zenoh_protocol::core::WhatAmI; + use crate::{ defaults::{ self, DEFAULT_CONNECT_EXIT_ON_FAIL, DEFAULT_CONNECT_TIMEOUT_MS, DEFAULT_LISTEN_EXIT_ON_FAIL, DEFAULT_LISTEN_TIMEOUT_MS, }, + mode_dependent::*, Config, }; -use serde::{Deserialize, Serialize}; -use zenoh_core::zparse_default; -use zenoh_protocol::core::WhatAmI; - -use crate::mode_dependent::*; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct ConnectionRetryModeDependentConf { diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 26f7cfefaa..c55480b2c5 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -16,10 +16,6 @@ pub mod defaults; mod include; -use include::recursive_include; -use secrecy::{CloneableSecret, DebugSecret, Secret, SerializableSecret, Zeroize}; -use serde::{Deserialize, Serialize}; -use serde_json::{Map, Value}; #[allow(unused_imports)] use std::convert::TryFrom; // This is a false positive from the rust analyser use std::{ @@ -31,6 +27,11 @@ use std::{ path::Path, sync::{Arc, Mutex, MutexGuard, Weak}, }; + +use include::recursive_include; +use secrecy::{CloneableSecret, DebugSecret, Secret, SerializableSecret, Zeroize}; +use serde::{Deserialize, Serialize}; +use serde_json::{Map, Value}; use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; use zenoh_core::zlock; diff --git a/commons/zenoh-config/src/mode_dependent.rs b/commons/zenoh-config/src/mode_dependent.rs index 9f6cc2c7e4..074dd823d9 100644 --- a/commons/zenoh-config/src/mode_dependent.rs +++ b/commons/zenoh-config/src/mode_dependent.rs @@ -12,12 +12,12 @@ // ZettaScale Zenoh Team, // +use std::{fmt, marker::PhantomData}; + use serde::{ de::{self, MapAccess, Visitor}, Deserialize, Serialize, }; -use std::fmt; -use std::marker::PhantomData; pub use zenoh_protocol::core::{ whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, }; diff --git a/commons/zenoh-core/src/lib.rs b/commons/zenoh-core/src/lib.rs index 19cf3751ff..8d6fbfcc0a 100644 --- a/commons/zenoh-core/src/lib.rs +++ b/commons/zenoh-core/src/lib.rs @@ -27,8 +27,7 @@ pub use zenoh_result::{bail, to_zerror, zerror}; pub mod zresult { pub use zenoh_result::*; } -pub use zresult::Error; -pub use zresult::ZResult as Result; +pub use zresult::{Error, ZResult as Result}; /// A resolvable execution, either sync or async pub trait Resolvable { diff --git a/commons/zenoh-crypto/src/cipher.rs b/commons/zenoh-crypto/src/cipher.rs index 3d12712e56..aa78b97b46 100644 --- a/commons/zenoh-crypto/src/cipher.rs +++ b/commons/zenoh-crypto/src/cipher.rs @@ -11,12 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::PseudoRng; -use aes::cipher::{generic_array::GenericArray, BlockDecrypt, BlockEncrypt, KeyInit}; -use aes::Aes128; +use aes::{ + cipher::{generic_array::GenericArray, BlockDecrypt, BlockEncrypt, KeyInit}, + Aes128, +}; use rand::Rng; use zenoh_result::{bail, ZResult}; +use super::PseudoRng; + pub struct BlockCipher { inner: Aes128, } @@ -68,9 +71,10 @@ impl BlockCipher { mod tests { #[test] fn cipher() { - use super::{BlockCipher, PseudoRng}; use rand::{RngCore, SeedableRng}; + use super::{BlockCipher, PseudoRng}; + fn encrypt_decrypt(cipher: &BlockCipher, prng: &mut PseudoRng) { println!("\n[1]"); let t1 = "A".as_bytes().to_vec(); diff --git a/commons/zenoh-keyexpr/benches/keyexpr_tree.rs b/commons/zenoh-keyexpr/benches/keyexpr_tree.rs index 4047e3cf5c..7048521eda 100644 --- a/commons/zenoh-keyexpr/benches/keyexpr_tree.rs +++ b/commons/zenoh-keyexpr/benches/keyexpr_tree.rs @@ -18,12 +18,15 @@ use std::{ }; use rand::SeedableRng; -use zenoh_keyexpr::keyexpr_tree::{ - impls::{HashMapProvider, VecSetProvider}, - traits::*, - KeArcTree, KeBoxTree, +use zenoh_keyexpr::{ + fuzzer::KeyExprFuzzer, + keyexpr_tree::{ + impls::{HashMapProvider, VecSetProvider}, + traits::*, + KeArcTree, KeBoxTree, + }, + OwnedKeyExpr, }; -use zenoh_keyexpr::{fuzzer::KeyExprFuzzer, OwnedKeyExpr}; #[derive(Clone, Copy, Debug, Default)] pub struct Averager { diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 85b4ef79e2..fd87cef55f 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -use super::{canon::Canonizable, OwnedKeyExpr, FORBIDDEN_CHARS}; use alloc::{ borrow::{Borrow, ToOwned}, format, @@ -24,8 +23,11 @@ use core::{ fmt, ops::{Deref, Div}, }; + use zenoh_result::{bail, Error as ZError, ZResult}; +use super::{canon::Canonizable, OwnedKeyExpr, FORBIDDEN_CHARS}; + /// A [`str`] newtype that is statically known to be a valid key expression. /// /// The exact key expression specification can be found [here](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md). Here are the major lines: diff --git a/commons/zenoh-keyexpr/src/key_expr/canon.rs b/commons/zenoh-keyexpr/src/key_expr/canon.rs index 00e79b0c08..cccccdfba3 100644 --- a/commons/zenoh-keyexpr/src/key_expr/canon.rs +++ b/commons/zenoh-keyexpr/src/key_expr/canon.rs @@ -11,12 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // +use alloc::string::String; +use core::{slice, str}; + use crate::key_expr::{ utils::{Split, Writer}, DELIMITER, DOUBLE_WILD, SINGLE_WILD, }; -use alloc::string::String; -use core::{slice, str}; pub trait Canonizable { fn canonize(&mut self); diff --git a/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs b/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs index 52f01c5b6a..a6329cdf73 100644 --- a/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs +++ b/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs @@ -230,8 +230,9 @@ fn do_parse<'a>( #[test] fn parsing() { - use crate::key_expr::OwnedKeyExpr; use core::convert::TryFrom; + + use crate::key_expr::OwnedKeyExpr; for a_spec in ["${a:*}", "a/${a:*}"] { for b_spec in ["b/${b:**}", "${b:**}"] { let specs = [a_spec, b_spec, "c"]; diff --git a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs index fa346a2d4a..77388a55c9 100644 --- a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs +++ b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs @@ -119,8 +119,7 @@ pub fn intersect(s1: &[u8], s2: &[u8]) -> bool { it_intersect::(s1, s2) } -use super::restiction::NoSubWilds; -use super::{Intersector, MayHaveVerbatim}; +use super::{restiction::NoSubWilds, Intersector, MayHaveVerbatim}; pub struct ClassicIntersector; impl Intersector, NoSubWilds<&[u8]>> for ClassicIntersector { diff --git a/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs b/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs index f5d7735d9e..06b990ee72 100644 --- a/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs +++ b/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs @@ -12,9 +12,8 @@ // ZettaScale Zenoh Team, // -use crate::DELIMITER; - use super::keyexpr; +use crate::DELIMITER; mod classical; pub use classical::ClassicIntersector; diff --git a/commons/zenoh-keyexpr/src/key_expr/owned.rs b/commons/zenoh-keyexpr/src/key_expr/owned.rs index 5164e4762c..a53fdec2f0 100644 --- a/commons/zenoh-keyexpr/src/key_expr/owned.rs +++ b/commons/zenoh-keyexpr/src/key_expr/owned.rs @@ -13,7 +13,6 @@ // extern crate alloc; -use super::{canon::Canonizable, keyexpr}; // use crate::core::WireExpr; use alloc::{borrow::ToOwned, boxed::Box, string::String, sync::Arc}; use core::{ @@ -23,6 +22,8 @@ use core::{ str::FromStr, }; +use super::{canon::Canonizable, keyexpr}; + /// A [`Arc`] newtype that is statically known to be a valid key expression. /// /// See [`keyexpr`](super::borrowed::keyexpr). diff --git a/commons/zenoh-keyexpr/src/key_expr/tests.rs b/commons/zenoh-keyexpr/src/key_expr/tests.rs index 6d9e64896e..c004666776 100644 --- a/commons/zenoh-keyexpr/src/key_expr/tests.rs +++ b/commons/zenoh-keyexpr/src/key_expr/tests.rs @@ -12,9 +12,10 @@ // ZettaScale Zenoh Team, // -use crate::key_expr::{fuzzer, intersect::*, keyexpr}; use std::{convert::TryInto, fmt::Debug}; +use crate::key_expr::{fuzzer, intersect::*, keyexpr}; + type BoxedIntersectors = Vec Intersector<&'a keyexpr, &'a keyexpr> + Send + Sync>>; lazy_static::lazy_static! { diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs index a0428ac563..e800697bef 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs @@ -20,10 +20,11 @@ use core::fmt::Debug; use token_cell::prelude::*; -use super::box_tree::PruneResult; -use super::support::IterOrOption; -use crate::keyexpr; -use crate::keyexpr_tree::{support::IWildness, *}; +use super::{box_tree::PruneResult, support::IterOrOption}; +use crate::{ + keyexpr, + keyexpr_tree::{support::IWildness, *}, +}; pub struct KeArcTreeInner< Weight, @@ -428,6 +429,7 @@ where pub(crate) mod sealed { use alloc::sync::Arc; use core::ops::{Deref, DerefMut}; + use token_cell::prelude::{TokenCell, TokenTrait}; pub struct Tokenized(pub A, pub(crate) B); diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs index 5aa23e78ac..fcf230731a 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs @@ -17,15 +17,15 @@ use alloc::boxed::Box; use alloc::string::String; use core::ptr::NonNull; -use crate::keyexpr; -use crate::keyexpr_tree::{ - support::{IWildness, NonWild, UnknownWildness}, - *, +use super::{impls::KeyedSetProvider, support::IterOrOption}; +use crate::{ + keyexpr, + keyexpr_tree::{ + support::{IWildness, NonWild, UnknownWildness}, + *, + }, }; -use super::impls::KeyedSetProvider; -use super::support::IterOrOption; - /// A fully owned KeTree. /// /// Note that most of `KeBoxTree`'s methods are declared in the [`IKeyExprTree`] and [`IKeyExprTreeMut`] traits. diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs index 72f830a912..a5a16e1d82 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs @@ -17,17 +17,18 @@ use core::hash::Hasher; // `SipHasher` is deprecated in favour of a symbol that only exists in `std` #[allow(deprecated)] use core::hash::SipHasher as DefaultHasher; -#[cfg(not(feature = "std"))] -use hashbrown::{ - hash_map::{Entry, Iter, IterMut, Values, ValuesMut}, - HashMap, -}; #[cfg(feature = "std")] use std::collections::{ hash_map::{DefaultHasher, Entry, Iter, IterMut, Values, ValuesMut}, HashMap, }; +#[cfg(not(feature = "std"))] +use hashbrown::{ + hash_map::{Entry, Iter, IterMut, Values, ValuesMut}, + HashMap, +}; + use crate::keyexpr_tree::*; #[cfg_attr(not(feature = "std"), allow(deprecated))] diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs index 4fab65a850..a6b1847697 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs @@ -20,9 +20,10 @@ use core::hash::SipHasher as DefaultHasher; #[cfg(feature = "std")] use std::collections::hash_map::DefaultHasher; -use crate::keyexpr_tree::*; use keyed_set::{KeyExtractor, KeyedSet}; +use crate::keyexpr_tree::*; + #[cfg_attr(not(feature = "std"), allow(deprecated))] pub struct KeyedSetProvider( core::marker::PhantomData, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs index 2645c9d95b..48547429f3 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs @@ -12,10 +12,11 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr; pub use hashmap_impl::HashMapProvider; pub use keyed_set_impl::KeyedSetProvider; pub use vec_set_impl::VecSetProvider; + +use crate::keyexpr; mod hashmap_impl; mod keyed_set_impl; mod vec_set_impl; diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs index 96877ebda6..510755e3c4 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs @@ -13,6 +13,7 @@ // use alloc::vec::Vec; + use zenoh_result::unlikely; use crate::keyexpr_tree::*; diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs index a22d0804b1..bf09714f29 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs @@ -12,9 +12,10 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr_tree::*; use alloc::vec::Vec; +use crate::keyexpr_tree::*; + struct StackFrame<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> where Children::Assoc: IChildren + 'a, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs index 0ed2c96645..87e5af90a9 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs @@ -12,10 +12,12 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr_tree::*; use alloc::vec::Vec; + use zenoh_result::unlikely; +use crate::keyexpr_tree::*; + struct StackFrame<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> where Children::Assoc: IChildren + 'a, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs index 34902810f0..dccd571911 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs @@ -12,10 +12,12 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr_tree::*; use alloc::vec::Vec; + use zenoh_result::unlikely; +use crate::keyexpr_tree::*; + struct StackFrame<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> where Children::Assoc: IChildren + 'a, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs index 666f0cb2c2..05afae3885 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs @@ -12,9 +12,8 @@ // ZettaScale Zenoh Team, // -use core::num::NonZeroUsize; - use alloc::vec::Vec; +use core::num::NonZeroUsize; use crate::keyexpr_tree::*; pub struct TreeIter<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs index fc2372a67b..ac3d15c6ec 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs @@ -12,24 +12,25 @@ // ZettaScale Zenoh Team, // -use crate::fuzzer::KeyExprFuzzer; use alloc::vec::Vec; -use rand::Rng; - -use super::{ - impls::{KeyedSetProvider, VecSetProvider}, - *, -}; use core::{ convert::{TryFrom, TryInto}, fmt::Debug, ops::Deref, }; -#[cfg(not(feature = "std"))] -use hashbrown::HashMap; #[cfg(feature = "std")] use std::collections::HashMap; +#[cfg(not(feature = "std"))] +use hashbrown::HashMap; +use rand::Rng; + +use super::{ + impls::{KeyedSetProvider, VecSetProvider}, + *, +}; +use crate::fuzzer::KeyExprFuzzer; + fn insert<'a, K: TryInto<&'a keyexpr>, V: Clone + PartialEq + Debug + 'static>( ketree: &mut KeBoxTree, map: &mut HashMap>, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs index e6def16608..6a043ccda0 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs @@ -13,6 +13,7 @@ // use alloc::{boxed::Box, sync::Arc}; + use token_cell::prelude::{TokenCell, TokenCellTrait, TokenTrait}; use super::*; diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs index dd06cf14b8..03a97f5063 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs @@ -12,8 +12,9 @@ // ZettaScale Zenoh Team, // -use crate::{keyexpr, OwnedKeyExpr}; use alloc::boxed::Box; + +use crate::{keyexpr, OwnedKeyExpr}; pub mod default_impls; /// The basic immutable methods of all KeTrees. diff --git a/commons/zenoh-protocol/src/common/extension.rs b/commons/zenoh-protocol/src/common/extension.rs index f61df61cc6..1d9ff41d50 100644 --- a/commons/zenoh-protocol/src/common/extension.rs +++ b/commons/zenoh-protocol/src/common/extension.rs @@ -15,6 +15,7 @@ use core::{ convert::TryFrom, fmt::{self, Debug}, }; + use zenoh_buffers::ZBuf; /// # Zenoh extensions diff --git a/commons/zenoh-protocol/src/core/cowstr.rs b/commons/zenoh-protocol/src/core/cowstr.rs index 209d020f40..b31c1c4a5d 100644 --- a/commons/zenoh-protocol/src/core/cowstr.rs +++ b/commons/zenoh-protocol/src/core/cowstr.rs @@ -12,8 +12,10 @@ // ZettaScale Zenoh Team, // use alloc::{borrow::ToOwned, boxed::Box, string::String, vec::Vec}; -use core::fmt::{Debug, Display, Formatter}; -use core::num::NonZeroUsize; +use core::{ + fmt::{Debug, Display, Formatter}, + num::NonZeroUsize, +}; enum CowStrInner<'a> { Borrowed(&'a str), diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index 70afdbf143..e58088b581 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use core::fmt::Debug; + use zenoh_buffers::ZSlice; pub type EncodingId = u16; diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index a61fdd8e89..8b2c4ad01c 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{locator::*, parameters::Parameters}; use alloc::{borrow::ToOwned, format, string::String}; use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; + use zenoh_result::{bail, zerror, Error as ZError, ZResult}; +use super::{locator::*, parameters::Parameters}; + // Parsing chars pub const PROTO_SEPARATOR: char = '/'; pub const METADATA_SEPARATOR: char = '?'; diff --git a/commons/zenoh-protocol/src/core/locator.rs b/commons/zenoh-protocol/src/core/locator.rs index 50b909b12f..14f899e7c6 100644 --- a/commons/zenoh-protocol/src/core/locator.rs +++ b/commons/zenoh-protocol/src/core/locator.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::endpoint::*; use alloc::{borrow::ToOwned, string::String}; use core::{convert::TryFrom, fmt, hash::Hash, str::FromStr}; + use zenoh_result::{Error as ZError, ZResult}; +use super::endpoint::*; + /// A string that respects the [`Locator`] canon form: `/
[?]`. /// /// `` is of the form `=;...;=` where keys are alphabetically sorted. diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 0920d55d01..9f10cab391 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -23,6 +23,7 @@ use core::{ hash::Hash, str::FromStr, }; + pub use uhlc::{Timestamp, NTP64}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_result::{bail, zerror}; @@ -33,7 +34,6 @@ pub type TimestampId = uhlc::ID; /// Constants and helpers for zenoh `whatami` flags. pub mod whatami; pub use whatami::*; - pub use zenoh_keyexpr::key_expr; pub mod wire_expr; diff --git a/commons/zenoh-protocol/src/core/properties.rs b/commons/zenoh-protocol/src/core/properties.rs index a4c2c35197..5264288448 100644 --- a/commons/zenoh-protocol/src/core/properties.rs +++ b/commons/zenoh-protocol/src/core/properties.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; use alloc::{ borrow::Cow, string::{String, ToString}, @@ -20,6 +19,8 @@ use core::{borrow::Borrow, fmt}; #[cfg(feature = "std")] use std::collections::HashMap; +use super::parameters::{Parameters, FIELD_SEPARATOR, LIST_SEPARATOR, VALUE_SEPARATOR}; + /// A map of key/value (String,String) properties. /// It can be parsed from a String, using `;` or `` as separator between each properties /// and `=` as separator between a key and its value. Keys and values are trimed. diff --git a/commons/zenoh-protocol/src/core/resolution.rs b/commons/zenoh-protocol/src/core/resolution.rs index 093fd33bb4..fb16a5c713 100644 --- a/commons/zenoh-protocol/src/core/resolution.rs +++ b/commons/zenoh-protocol/src/core/resolution.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{network::RequestId, transport::TransportSn}; use alloc::string::String; use core::{fmt, str::FromStr}; + use zenoh_result::{bail, ZError}; +use crate::{network::RequestId, transport::TransportSn}; + #[repr(u8)] // The value represents the 2-bit encoded value #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] diff --git a/commons/zenoh-protocol/src/core/whatami.rs b/commons/zenoh-protocol/src/core/whatami.rs index 6aacb0d356..10c5b42c78 100644 --- a/commons/zenoh-protocol/src/core/whatami.rs +++ b/commons/zenoh-protocol/src/core/whatami.rs @@ -12,8 +12,9 @@ // ZettaScale Zenoh Team, // use alloc::string::String; -use const_format::formatcp; use core::{convert::TryFrom, fmt, num::NonZeroU8, ops::BitOr, str::FromStr}; + +use const_format::formatcp; use zenoh_result::{bail, ZError}; #[repr(u8)] diff --git a/commons/zenoh-protocol/src/core/wire_expr.rs b/commons/zenoh-protocol/src/core/wire_expr.rs index a66b1aa212..9f5c432665 100644 --- a/commons/zenoh-protocol/src/core/wire_expr.rs +++ b/commons/zenoh-protocol/src/core/wire_expr.rs @@ -18,6 +18,7 @@ use alloc::{ string::{String, ToString}, }; use core::{convert::TryInto, fmt, sync::atomic::AtomicU16}; + use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_result::{bail, ZResult}; diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 9a41f42e56..a5373cd5f4 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -11,19 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::{imsg, ZExtZ64, ZExtZBuf}, - core::{ExprId, Reliability, WireExpr}, - network::Mapping, - zextz64, zextzbuf, -}; use alloc::borrow::Cow; + pub use common::*; pub use keyexpr::*; pub use queryable::*; pub use subscriber::*; pub use token::*; +use crate::{ + common::{imsg, ZExtZ64, ZExtZBuf}, + core::{ExprId, Reliability, WireExpr}, + network::Mapping, + zextz64, zextzbuf, +}; + pub mod flag { pub const I: u8 = 1 << 5; // 0x20 Interest if I==1 then the declare is in a response to an Interest with future==false // pub const X: u8 = 1 << 6; // 0x40 Reserved @@ -288,9 +290,8 @@ pub mod keyexpr { } pub mod subscriber { - use crate::core::EntityId; - use super::*; + use crate::core::EntityId; pub type SubscriberId = EntityId; @@ -448,9 +449,8 @@ pub mod subscriber { } pub mod queryable { - use crate::core::EntityId; - use super::*; + use crate::core::EntityId; pub type QueryableId = EntityId; diff --git a/commons/zenoh-protocol/src/network/interest.rs b/commons/zenoh-protocol/src/network/interest.rs index e7eb75787e..46797b72ee 100644 --- a/commons/zenoh-protocol/src/network/interest.rs +++ b/commons/zenoh-protocol/src/network/interest.rs @@ -11,13 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::imsg, core::WireExpr, network::Mapping}; use core::{ fmt::{self, Debug}, ops::{Add, AddAssign, Sub, SubAssign}, sync::atomic::AtomicU32, }; +use crate::{common::imsg, core::WireExpr, network::Mapping}; + pub type InterestId = u32; pub mod flag { diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index 5a0635c9e0..952fe74e89 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -217,11 +217,12 @@ impl From for NetworkMessage { // Extensions pub mod ext { + use core::fmt; + use crate::{ common::{imsg, ZExtZ64}, core::{CongestionControl, EntityId, Priority, ZenohId}, }; - use core::fmt; /// ```text /// 7 6 5 4 3 2 1 0 diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index ff978744e8..09e8e6b2b6 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{core::WireExpr, zenoh::RequestBody}; use core::sync::atomic::AtomicU32; +use crate::{core::WireExpr, zenoh::RequestBody}; + /// The resolution of a RequestId pub type RequestId = u32; pub type AtomicRequestId = AtomicU32; @@ -64,11 +65,12 @@ pub struct Request { } pub mod ext { + use core::{num::NonZeroU32, time::Duration}; + use crate::{ common::{ZExtZ64, ZExtZBuf}, zextz64, zextzbuf, }; - use core::{num::NonZeroU32, time::Duration}; pub type QoS = zextz64!(0x1, false); pub type QoSType = crate::network::ext::QoSType<{ QoS::ID }>; diff --git a/commons/zenoh-protocol/src/scouting/hello.rs b/commons/zenoh-protocol/src/scouting/hello.rs index 562e2fb8c4..62ea915e5a 100644 --- a/commons/zenoh-protocol/src/scouting/hello.rs +++ b/commons/zenoh-protocol/src/scouting/hello.rs @@ -11,10 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::core::{Locator, WhatAmI, ZenohId}; use alloc::vec::Vec; use core::fmt; +use crate::core::{Locator, WhatAmI, ZenohId}; + /// # Hello message /// /// The [`Hello`] message is used to advertise the locators a zenoh node is reachable at. diff --git a/commons/zenoh-protocol/src/transport/fragment.rs b/commons/zenoh-protocol/src/transport/fragment.rs index 3e80c9cfbf..0a1df1fdf5 100644 --- a/commons/zenoh-protocol/src/transport/fragment.rs +++ b/commons/zenoh-protocol/src/transport/fragment.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // +use zenoh_buffers::ZSlice; + use crate::core::Reliability; pub use crate::transport::TransportSn; -use zenoh_buffers::ZSlice; /// # Fragment message /// diff --git a/commons/zenoh-protocol/src/transport/frame.rs b/commons/zenoh-protocol/src/transport/frame.rs index 184784f9f1..02a4ead48f 100644 --- a/commons/zenoh-protocol/src/transport/frame.rs +++ b/commons/zenoh-protocol/src/transport/frame.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{core::Reliability, network::NetworkMessage, transport::TransportSn}; use alloc::vec::Vec; +use crate::{core::Reliability, network::NetworkMessage, transport::TransportSn}; + /// # Frame message /// /// The [`Frame`] message is used to transmit one ore more complete serialized diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index 7e86d17af2..b1febac4b5 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use zenoh_buffers::ZSlice; + use crate::{ core::{Resolution, WhatAmI, ZenohId}, transport::BatchSize, }; -use zenoh_buffers::ZSlice; /// # Init message /// @@ -158,9 +159,10 @@ pub mod ext { impl InitSyn { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; + use crate::common::{ZExtUnit, ZExtZBuf}; + let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); @@ -213,9 +215,10 @@ pub struct InitAck { impl InitAck { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; + use crate::common::{ZExtUnit, ZExtZBuf}; + let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); diff --git a/commons/zenoh-protocol/src/transport/join.rs b/commons/zenoh-protocol/src/transport/join.rs index a5cf1422a6..9918de6acf 100644 --- a/commons/zenoh-protocol/src/transport/join.rs +++ b/commons/zenoh-protocol/src/transport/join.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use core::time::Duration; + use crate::{ core::{Priority, Resolution, WhatAmI, ZenohId}, transport::{BatchSize, PrioritySn}, }; -use core::time::Duration; /// # Join message /// @@ -115,9 +116,10 @@ pub struct Join { // Extensions pub mod ext { + use alloc::boxed::Box; + use super::{Priority, PrioritySn}; use crate::{common::ZExtZBuf, zextzbuf}; - use alloc::boxed::Box; /// # QoS extension /// Used to announce next sn when QoS is enabled @@ -132,9 +134,10 @@ pub mod ext { impl Join { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::ZExtZBuf; use rand::Rng; + use crate::common::ZExtZBuf; + let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); diff --git a/commons/zenoh-protocol/src/transport/open.rs b/commons/zenoh-protocol/src/transport/open.rs index c643286193..8c2e1429ec 100644 --- a/commons/zenoh-protocol/src/transport/open.rs +++ b/commons/zenoh-protocol/src/transport/open.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::transport::TransportSn; use core::time::Duration; + use zenoh_buffers::ZSlice; +use crate::transport::TransportSn; + /// # Open message /// /// After having succesfully complete the [`super::InitSyn`]-[`super::InitAck`] message exchange, @@ -88,15 +90,14 @@ pub struct OpenSyn { // Extensions pub mod ext { - use crate::{ - common::{ZExtUnit, ZExtZBuf}, - zextunit, zextzbuf, - }; - #[cfg(feature = "shared-memory")] use crate::common::ZExtZ64; #[cfg(feature = "shared-memory")] use crate::zextz64; + use crate::{ + common::{ZExtUnit, ZExtZBuf}, + zextunit, zextzbuf, + }; /// # QoS extension /// Used to negotiate the use of QoS @@ -128,11 +129,11 @@ pub mod ext { impl OpenSyn { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; #[cfg(feature = "shared-memory")] use crate::common::ZExtZ64; + use crate::common::{ZExtUnit, ZExtZBuf}; const MIN: usize = 32; const MAX: usize = 1_024; @@ -186,11 +187,11 @@ pub struct OpenAck { impl OpenAck { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; #[cfg(feature = "shared-memory")] use crate::common::ZExtZ64; + use crate::common::{ZExtUnit, ZExtZBuf}; let mut rng = rand::thread_rng(); diff --git a/commons/zenoh-protocol/src/zenoh/del.rs b/commons/zenoh-protocol/src/zenoh/del.rs index 84fec5bc08..4723cd5415 100644 --- a/commons/zenoh-protocol/src/zenoh/del.rs +++ b/commons/zenoh-protocol/src/zenoh/del.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::ZExtUnknown; use alloc::vec::Vec; + use uhlc::Timestamp; +use crate::common::ZExtUnknown; + /// # Put message /// /// ```text @@ -62,8 +64,9 @@ pub mod ext { impl Del { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; use rand::Rng; + + use crate::{common::iext, core::ZenohId}; let mut rng = rand::thread_rng(); let timestamp = rng.gen_bool(0.5).then_some({ diff --git a/commons/zenoh-protocol/src/zenoh/err.rs b/commons/zenoh-protocol/src/zenoh/err.rs index b6aa5f4954..b8808d96d7 100644 --- a/commons/zenoh-protocol/src/zenoh/err.rs +++ b/commons/zenoh-protocol/src/zenoh/err.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::Encoding}; use alloc::vec::Vec; + use zenoh_buffers::ZBuf; +use crate::{common::ZExtUnknown, core::Encoding}; + /// # Err message /// /// ```text @@ -71,8 +73,9 @@ pub mod ext { impl Err { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::iext; use rand::Rng; + + use crate::common::iext; let mut rng = rand::thread_rng(); let encoding = Encoding::rand(); diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 7bca48f3ba..af9ba853f5 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -17,13 +17,14 @@ pub mod put; pub mod query; pub mod reply; -use crate::core::Encoding; pub use del::Del; pub use err::Err; pub use put::Put; pub use query::{Consolidation, Query}; pub use reply::Reply; +use crate::core::Encoding; + pub mod id { pub const OAM: u8 = 0x00; pub const PUT: u8 = 0x01; diff --git a/commons/zenoh-protocol/src/zenoh/put.rs b/commons/zenoh-protocol/src/zenoh/put.rs index 14674e9ad9..ef0a71db09 100644 --- a/commons/zenoh-protocol/src/zenoh/put.rs +++ b/commons/zenoh-protocol/src/zenoh/put.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::Encoding}; use alloc::vec::Vec; + use uhlc::Timestamp; use zenoh_buffers::ZBuf; +use crate::{common::ZExtUnknown, core::Encoding}; + /// # Put message /// /// ```text @@ -80,8 +82,9 @@ pub mod ext { impl Put { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; use rand::Rng; + + use crate::{common::iext, core::ZenohId}; let mut rng = rand::thread_rng(); let timestamp = rng.gen_bool(0.5).then_some({ diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index f1baaebe20..988447b835 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::ZExtUnknown; use alloc::{string::String, vec::Vec}; +use crate::common::ZExtUnknown; + /// The kind of consolidation. #[repr(u8)] #[derive(Debug, Default, Clone, PartialEq, Eq, Copy)] @@ -108,11 +109,12 @@ pub mod ext { impl Query { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::iext; use rand::{ distributions::{Alphanumeric, DistString}, Rng, }; + + use crate::common::iext; let mut rng = rand::thread_rng(); const MIN: usize = 2; diff --git a/commons/zenoh-protocol/src/zenoh/reply.rs b/commons/zenoh-protocol/src/zenoh/reply.rs index 7cbab4ca0a..f29521a4a9 100644 --- a/commons/zenoh-protocol/src/zenoh/reply.rs +++ b/commons/zenoh-protocol/src/zenoh/reply.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use alloc::vec::Vec; + use crate::{ common::ZExtUnknown, zenoh::{query::Consolidation, PushBody}, }; -use alloc::vec::Vec; /// # Reply message /// diff --git a/commons/zenoh-result/src/lib.rs b/commons/zenoh-result/src/lib.rs index 60148c763f..79de74f4eb 100644 --- a/commons/zenoh-result/src/lib.rs +++ b/commons/zenoh-result/src/lib.rs @@ -20,9 +20,10 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; -use anyhow::Error as AnyError; use core::fmt; +use anyhow::Error as AnyError; + #[cold] pub const fn cold() {} pub const fn likely(b: bool) -> bool { diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs index dcd46744e6..9c5af8a107 100644 --- a/commons/zenoh-runtime/src/lib.rs +++ b/commons/zenoh-runtime/src/lib.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // use core::panic; -use lazy_static::lazy_static; -use serde::Deserialize; use std::{ borrow::Borrow, collections::HashMap, @@ -26,6 +24,9 @@ use std::{ }, time::Duration, }; + +use lazy_static::lazy_static; +use serde::Deserialize; use tokio::runtime::{Handle, Runtime, RuntimeFlavor}; use zenoh_macros::{GenericRuntimeParam, RegisterParam}; use zenoh_result::ZResult as Result; diff --git a/commons/zenoh-shm/src/api/client/shared_memory_client.rs b/commons/zenoh-shm/src/api/client/shared_memory_client.rs index abc7221300..dd3cf5db12 100644 --- a/commons/zenoh-shm/src/api/client/shared_memory_client.rs +++ b/commons/zenoh-shm/src/api/client/shared_memory_client.rs @@ -12,15 +12,12 @@ // ZettaScale Zenoh Team, // -use std::fmt::Debug; - -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use zenoh_result::ZResult; -use crate::api::common::types::SegmentID; - use super::shared_memory_segment::SharedMemorySegment; +use crate::api::common::types::SegmentID; /// SharedMemoryClient - client factory implementation for particular shared memory protocol #[zenoh_macros::unstable_doc] diff --git a/commons/zenoh-shm/src/api/client/shared_memory_segment.rs b/commons/zenoh-shm/src/api/client/shared_memory_segment.rs index 88eaf8761f..e3aaf9ba39 100644 --- a/commons/zenoh-shm/src/api/client/shared_memory_segment.rs +++ b/commons/zenoh-shm/src/api/client/shared_memory_segment.rs @@ -12,9 +12,7 @@ // ZettaScale Zenoh Team, // -use std::fmt::Debug; - -use std::sync::atomic::AtomicPtr; +use std::{fmt::Debug, sync::atomic::AtomicPtr}; use zenoh_result::ZResult; diff --git a/commons/zenoh-shm/src/api/client_storage/mod.rs b/commons/zenoh-shm/src/api/client_storage/mod.rs index 0ce1a8af11..7b78c23182 100644 --- a/commons/zenoh-shm/src/api/client_storage/mod.rs +++ b/commons/zenoh-shm/src/api/client_storage/mod.rs @@ -12,26 +12,27 @@ // ZettaScale Zenoh Team, // -use lazy_static::lazy_static; use std::{ collections::HashMap, sync::{Arc, RwLock}, }; +use lazy_static::lazy_static; use zenoh_result::{bail, ZResult}; -use crate::api::{ - client::{ - shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, - }, - common::types::ProtocolID, - protocol_implementations::posix::{ - posix_shared_memory_client::PosixSharedMemoryClient, protocol_id::POSIX_PROTOCOL_ID, +use crate::{ + api::{ + client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }, + common::types::ProtocolID, + protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, protocol_id::POSIX_PROTOCOL_ID, + }, }, + reader::{ClientStorage, GlobalDataSegmentID}, }; -use crate::reader::{ClientStorage, GlobalDataSegmentID}; - lazy_static! { /// A global lazily-initialized SHM client storage. /// When initialized, contains default client set, diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs index 0184f50036..5684b0b15f 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_client.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use zenoh_result::ZResult; +use super::posix_shared_memory_segment::PosixSharedMemorySegment; use crate::api::{ client::{ shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, @@ -23,8 +24,6 @@ use crate::api::{ common::types::SegmentID, }; -use super::posix_shared_memory_segment::PosixSharedMemorySegment; - /// Client factory implementation for particular shared memory protocol #[zenoh_macros::unstable_doc] #[derive(Debug)] diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs index 89c1b91387..60e2a10891 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_provider_backend.rs @@ -25,6 +25,7 @@ use std::{ use zenoh_core::zlock; use zenoh_result::ZResult; +use super::posix_shared_memory_segment::PosixSharedMemorySegment; use crate::api::{ common::types::ChunkID, provider::{ @@ -34,8 +35,6 @@ use crate::api::{ }, }; -use super::posix_shared_memory_segment::PosixSharedMemorySegment; - // TODO: MIN_FREE_CHUNK_SIZE limitation is made to reduce memory fragmentation and lower // the CPU time needed to defragment() - that's reasonable, and there is additional thing here: // our SHM\zerocopy functionality outperforms common buffer transmission only starting from 1K diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs index eb49d141ca..3f74594ad0 100644 --- a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shared_memory_segment.rs @@ -16,10 +16,13 @@ use std::sync::atomic::AtomicPtr; use zenoh_result::ZResult; -use crate::api::common::types::SegmentID; -use crate::api::{client::shared_memory_segment::SharedMemorySegment, common::types::ChunkID}; - -use crate::posix_shm::array::ArrayInSHM; +use crate::{ + api::{ + client::shared_memory_segment::SharedMemorySegment, + common::types::{ChunkID, SegmentID}, + }, + posix_shm::array::ArrayInSHM, +}; const POSIX_SHM_SEGMENT_PREFIX: &str = "posix_shm_provider_segment"; diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index c3b8128300..58109a699d 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -22,6 +22,11 @@ use std::{ use async_trait::async_trait; use zenoh_result::ZResult; +use super::{ + chunk::{AllocatedChunk, ChunkDescriptor}, + shared_memory_provider_backend::SharedMemoryProviderBackend, + types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, +}; use crate::{ api::{common::types::ProtocolID, slice::zsliceshmmut::ZSliceShmMut}, header::{ @@ -38,12 +43,6 @@ use crate::{ SharedMemoryBuf, SharedMemoryBufInfo, }; -use super::{ - chunk::{AllocatedChunk, ChunkDescriptor}, - shared_memory_provider_backend::SharedMemoryProviderBackend, - types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, -}; - #[derive(Debug)] struct BusyChunk { descriptor: ChunkDescriptor, diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index 662482f567..ddf949ee75 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -16,9 +16,8 @@ use std::fmt::Display; use zenoh_result::{bail, ZResult}; -use crate::api::slice::zsliceshmmut::ZSliceShmMut; - use super::chunk::AllocatedChunk; +use crate::api::slice::zsliceshmmut::ZSliceShmMut; /// Allocation errors /// diff --git a/commons/zenoh-shm/src/api/slice/zsliceshm.rs b/commons/zenoh-shm/src/api/slice/zsliceshm.rs index 86f4395ebb..b2ba611b3c 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshm.rs +++ b/commons/zenoh-shm/src/api/slice/zsliceshm.rs @@ -20,9 +20,8 @@ use std::{ use zenoh_buffers::{ZBuf, ZSlice}; -use crate::SharedMemoryBuf; - use super::{traits::SHMBuf, zsliceshmmut::zsliceshmmut}; +use crate::SharedMemoryBuf; /// An immutable SHM slice #[zenoh_macros::unstable_doc] diff --git a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs b/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs index 62823785da..d866e4173e 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs +++ b/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs @@ -17,12 +17,11 @@ use std::borrow::{Borrow, BorrowMut}; use zenoh_buffers::{ZBuf, ZSlice}; -use crate::SharedMemoryBuf; - use super::{ traits::{SHMBuf, SHMBufMut}, zsliceshm::{zsliceshm, ZSliceShm}, }; +use crate::SharedMemoryBuf; /// A mutable SHM slice #[zenoh_macros::unstable_doc] diff --git a/commons/zenoh-shm/src/header/segment.rs b/commons/zenoh-shm/src/header/segment.rs index e36e54a233..ab2353c35d 100644 --- a/commons/zenoh-shm/src/header/segment.rs +++ b/commons/zenoh-shm/src/header/segment.rs @@ -14,12 +14,11 @@ use zenoh_result::ZResult; -use crate::posix_shm::array::ArrayInSHM; - use super::{ chunk_header::ChunkHeaderType, descriptor::{HeaderIndex, HeaderSegmentID}, }; +use crate::posix_shm::array::ArrayInSHM; const HEADER_SEGMENT_PREFIX: &str = "header"; diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index abcdd558fb..316477d26e 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -11,8 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use api::{common::types::ProtocolID, provider::chunk::ChunkDescriptor}; -use header::descriptor::{HeaderDescriptor, OwnedHeaderDescriptor}; use std::{ any::Any, sync::{ @@ -20,6 +18,9 @@ use std::{ Arc, }, }; + +use api::{common::types::ProtocolID, provider::chunk::ChunkDescriptor}; +use header::descriptor::{HeaderDescriptor, OwnedHeaderDescriptor}; use watchdog::{confirmator::ConfirmedDescriptor, descriptor::Descriptor}; use zenoh_buffers::ZSliceBuffer; diff --git a/commons/zenoh-shm/src/watchdog/segment.rs b/commons/zenoh-shm/src/watchdog/segment.rs index b4a273c01c..5943a10153 100644 --- a/commons/zenoh-shm/src/watchdog/segment.rs +++ b/commons/zenoh-shm/src/watchdog/segment.rs @@ -16,9 +16,8 @@ use std::sync::atomic::AtomicU64; use zenoh_result::ZResult; -use crate::posix_shm::array::ArrayInSHM; - use super::descriptor::SegmentID; +use crate::posix_shm::array::ArrayInSHM; const WATCHDOG_SEGMENT_PREFIX: &str = "watchdog"; diff --git a/commons/zenoh-sync/src/condition.rs b/commons/zenoh-sync/src/condition.rs index 098aa05411..99ba6d4ca2 100644 --- a/commons/zenoh-sync/src/condition.rs +++ b/commons/zenoh-sync/src/condition.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use event_listener::{Event, EventListener}; use std::{pin::Pin, sync::MutexGuard}; + +use event_listener::{Event, EventListener}; use tokio::sync::MutexGuard as AysncMutexGuard; pub type ConditionWaiter = Pin>; diff --git a/commons/zenoh-sync/src/fifo_queue.rs b/commons/zenoh-sync/src/fifo_queue.rs index e0ce57cb36..44bc2a5b17 100644 --- a/commons/zenoh-sync/src/fifo_queue.rs +++ b/commons/zenoh-sync/src/fifo_queue.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::Condition; use tokio::sync::Mutex; use zenoh_collections::RingBuffer; use zenoh_core::zasynclock; +use crate::Condition; + pub struct FifoQueue { not_empty: Condition, not_full: Condition, diff --git a/commons/zenoh-sync/src/lib.rs b/commons/zenoh-sync/src/lib.rs index 419246dc9d..20e95d2bb8 100644 --- a/commons/zenoh-sync/src/lib.rs +++ b/commons/zenoh-sync/src/lib.rs @@ -17,10 +17,13 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + use futures::FutureExt; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; pub mod fifo_queue; pub use fifo_queue::*; diff --git a/commons/zenoh-sync/src/lifo_queue.rs b/commons/zenoh-sync/src/lifo_queue.rs index f29614d4b2..9fe541da36 100644 --- a/commons/zenoh-sync/src/lifo_queue.rs +++ b/commons/zenoh-sync/src/lifo_queue.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::sync::{Condvar, Mutex}; + use zenoh_collections::StackBuffer; use zenoh_core::zlock; diff --git a/commons/zenoh-sync/src/mvar.rs b/commons/zenoh-sync/src/mvar.rs index 1b4a90e1e2..f818b44071 100644 --- a/commons/zenoh-sync/src/mvar.rs +++ b/commons/zenoh-sync/src/mvar.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::Condition; use std::sync::atomic::{AtomicUsize, Ordering}; + use tokio::sync::Mutex; use zenoh_core::zasynclock; +use crate::Condition; + pub struct Mvar { inner: Mutex>, cond_put: Condition, @@ -96,9 +98,9 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn mvar() -> ZResult<()> { + use std::{sync::Arc, time::Duration}; + use super::Mvar; - use std::sync::Arc; - use std::time::Duration; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/commons/zenoh-sync/src/object_pool.rs b/commons/zenoh-sync/src/object_pool.rs index 3386b2058b..ee6eed881b 100644 --- a/commons/zenoh-sync/src/object_pool.rs +++ b/commons/zenoh-sync/src/object_pool.rs @@ -11,15 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::LifoQueue; use std::{ any::Any, fmt, ops::{Deref, DerefMut, Drop}, sync::{Arc, Weak}, }; + use zenoh_buffers::ZSliceBuffer; +use super::LifoQueue; + /// Provides a pool of pre-allocated objects that are automaticlaly reinserted into /// the pool when dropped. pub struct RecyclingObjectPool diff --git a/commons/zenoh-sync/src/signal.rs b/commons/zenoh-sync/src/signal.rs index 74dd3e5199..053f5a13aa 100644 --- a/commons/zenoh-sync/src/signal.rs +++ b/commons/zenoh-sync/src/signal.rs @@ -11,8 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::sync::atomic::{AtomicBool, Ordering::*}; -use std::sync::Arc; +use std::sync::{ + atomic::{AtomicBool, Ordering::*}, + Arc, +}; + use tokio::sync::Semaphore; #[derive(Debug, Clone)] @@ -68,9 +71,10 @@ impl Default for Signal { #[cfg(test)] mod tests { - use super::*; use std::time::Duration; + use super::*; + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn signal_test() { let signal = Signal::new(); diff --git a/commons/zenoh-task/src/lib.rs b/commons/zenoh-task/src/lib.rs index d41eb50f34..7eab9d316f 100644 --- a/commons/zenoh-task/src/lib.rs +++ b/commons/zenoh-task/src/lib.rs @@ -18,12 +18,11 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use std::{future::Future, time::Duration}; + use futures::future::FutureExt; -use std::future::Future; -use std::time::Duration; use tokio::task::JoinHandle; -use tokio_util::sync::CancellationToken; -use tokio_util::task::TaskTracker; +use tokio_util::{sync::CancellationToken, task::TaskTracker}; use zenoh_core::{ResolveFuture, Wait}; use zenoh_runtime::ZRuntime; diff --git a/commons/zenoh-util/src/std_only/ffi/win.rs b/commons/zenoh-util/src/std_only/ffi/win.rs index 3a15871c20..7f0bbd986a 100644 --- a/commons/zenoh-util/src/std_only/ffi/win.rs +++ b/commons/zenoh-util/src/std_only/ffi/win.rs @@ -11,9 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::io; -use std::mem; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::{ + io, mem, + net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, +}; + use winapi::shared::{ws2def, ws2ipdef}; #[allow(clippy::many_single_char_names)] diff --git a/commons/zenoh-util/src/std_only/lib_loader.rs b/commons/zenoh-util/src/std_only/lib_loader.rs index 9c682e4343..d6b254eb35 100644 --- a/commons/zenoh-util/src/std_only/lib_loader.rs +++ b/commons/zenoh-util/src/std_only/lib_loader.rs @@ -11,11 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + env::consts::{DLL_PREFIX, DLL_SUFFIX}, + ffi::OsString, + ops::Deref, + path::PathBuf, +}; + use libloading::Library; -use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; -use std::ffi::OsString; -use std::ops::Deref; -use std::path::PathBuf; use tracing::{debug, warn}; use zenoh_core::zconfigurable; use zenoh_result::{bail, ZResult}; diff --git a/commons/zenoh-util/src/std_only/net/mod.rs b/commons/zenoh-util/src/std_only/net/mod.rs index 83ab08d678..65b665d31b 100644 --- a/commons/zenoh-util/src/std_only/net/mod.rs +++ b/commons/zenoh-util/src/std_only/net/mod.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::net::{IpAddr, Ipv6Addr}; + use tokio::net::{TcpSocket, UdpSocket}; use zenoh_core::zconfigurable; #[cfg(unix)] @@ -78,9 +79,10 @@ pub fn get_interface(name: &str) -> ZResult> { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -162,9 +164,10 @@ pub fn get_local_addresses(interface: Option<&str>) -> ZResult> { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_UNSPEC)?; let mut result = vec![]; @@ -242,9 +245,10 @@ pub fn get_unicast_addresses_of_interface(name: &str) -> ZResult> { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; let mut addrs = vec![]; @@ -281,9 +285,10 @@ pub fn get_index_of_interface(addr: IpAddr) -> ZResult { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -324,9 +329,10 @@ pub fn get_interface_names_by_addr(addr: IpAddr) -> ZResult> { { let mut result = vec![]; unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_adresses(winapi::shared::ws2def::AF_UNSPEC)?; if addr.is_unspecified() { diff --git a/commons/zenoh-util/src/std_only/time_range.rs b/commons/zenoh-util/src/std_only/time_range.rs index 9cfaf32655..51bff157ba 100644 --- a/commons/zenoh-util/src/std_only/time_range.rs +++ b/commons/zenoh-util/src/std_only/time_range.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -use humantime::{format_rfc3339, parse_rfc3339_weak}; use std::{ convert::{TryFrom, TryInto}, fmt::Display, @@ -20,6 +19,8 @@ use std::{ str::FromStr, time::{Duration, SystemTime}, }; + +use humantime::{format_rfc3339, parse_rfc3339_weak}; use zenoh_result::{bail, zerror, ZError}; const U_TO_SECS: f64 = 0.000001; diff --git a/commons/zenoh-util/src/std_only/timer.rs b/commons/zenoh-util/src/std_only/timer.rs index 6e7dde065a..d18b9192a4 100644 --- a/commons/zenoh-util/src/std_only/timer.rs +++ b/commons/zenoh-util/src/std_only/timer.rs @@ -11,16 +11,19 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::*; -use async_std::sync::Mutex; -use async_std::task; +use std::{ + cmp::Ordering as ComparisonOrdering, + collections::BinaryHeap, + sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, Weak, + }, + time::{Duration, Instant}, +}; + +use async_std::{prelude::*, sync::Mutex, task}; use async_trait::async_trait; use flume::{bounded, Receiver, RecvError, Sender}; -use std::cmp::Ordering as ComparisonOrdering; -use std::collections::BinaryHeap; -use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; -use std::sync::{Arc, Weak}; -use std::time::{Duration, Instant}; use zenoh_core::zconfigurable; zconfigurable! { @@ -296,12 +299,18 @@ impl Default for Timer { mod tests { #[test] fn timer() { - use super::{Timed, TimedEvent, Timer}; + use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::{Duration, Instant}, + }; + use async_std::task; use async_trait::async_trait; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Arc; - use std::time::{Duration, Instant}; + + use super::{Timed, TimedEvent, Timer}; #[derive(Clone)] struct MyEvent { diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 6b6326ebcf..2b5ba011f6 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 43747697b6..6a616bfa2d 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 81181f1a81..ad761bddd2 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::{Duration, Instant}; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 7a7bd61580..d4c5b4f162 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::{Duration, Instant}; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 7c2c9f2c65..0a2e4e09c1 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 5eb4f9e96e..d047d63203 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -12,8 +12,9 @@ // ZettaScale Zenoh Team, // -use clap::Parser; use std::convert::TryInto; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 55f211f111..1e13cefb2f 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 2b03e32d06..46ccfc8193 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -13,9 +13,10 @@ // #![recursion_limit = "256"] +use std::collections::HashMap; + use clap::Parser; use futures::select; -use std::collections::HashMap; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 5f5c77633f..9914539ed5 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -12,8 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::*; +use zenoh::{config::Config, prelude::*}; use zenoh_examples::CommonArgs; #[tokio::main] diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 6913a7bf08..1006fdb434 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Instant; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index 138726fd4f..5a41050e94 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -25,14 +25,14 @@ pub mod tls; mod unicast; use alloc::{borrow::ToOwned, boxed::Box, string::String, vec, vec::Vec}; -use async_trait::async_trait; use core::{cmp::PartialEq, fmt, hash::Hash}; + +use async_trait::async_trait; pub use listener::*; pub use multicast::*; use serde::Serialize; pub use unicast::*; -use zenoh_protocol::core::Locator; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{core::Locator, transport::BatchSize}; use zenoh_result::ZResult; /*************************************/ diff --git a/io/zenoh-link-commons/src/listener.rs b/io/zenoh-link-commons/src/listener.rs index be61e9cf89..48930a7a65 100644 --- a/io/zenoh-link-commons/src/listener.rs +++ b/io/zenoh-link-commons/src/listener.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + collections::HashMap, + net::{IpAddr, SocketAddr}, + sync::{Arc, RwLock}, +}; + use futures::Future; -use std::collections::HashMap; -use std::net::IpAddr; -use std::net::SocketAddr; -use std::sync::{Arc, RwLock}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use zenoh_core::{zread, zwrite}; diff --git a/io/zenoh-link-commons/src/multicast.rs b/io/zenoh-link-commons/src/multicast.rs index ccfe6842c1..ee07c4eb58 100644 --- a/io/zenoh-link-commons/src/multicast.rs +++ b/io/zenoh-link-commons/src/multicast.rs @@ -12,12 +12,13 @@ // ZettaScale Zenoh Team, // use alloc::{borrow::Cow, boxed::Box, sync::Arc, vec::Vec}; -use async_trait::async_trait; use core::{ fmt, hash::{Hash, Hasher}, ops::Deref, }; + +use async_trait::async_trait; use zenoh_buffers::{reader::HasReader, writer::HasWriter}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{ diff --git a/io/zenoh-link-commons/src/tls.rs b/io/zenoh-link-commons/src/tls.rs index 562b02c81e..427880b812 100644 --- a/io/zenoh-link-commons/src/tls.rs +++ b/io/zenoh-link-commons/src/tls.rs @@ -1,4 +1,5 @@ use alloc::vec::Vec; + use rustls::{ client::{ danger::{ServerCertVerified, ServerCertVerifier}, diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index 2bd1808acf..add4c3a27b 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -12,13 +12,14 @@ // ZettaScale Zenoh Team, // use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; -use async_trait::async_trait; use core::{ fmt, hash::{Hash, Hasher}, ops::Deref, }; use std::net::SocketAddr; + +use async_trait::async_trait; use zenoh_protocol::{ core::{EndPoint, Locator}, transport::BatchSize, diff --git a/io/zenoh-link/src/lib.rs b/io/zenoh-link/src/lib.rs index 21f26ecf1b..7898cf087d 100644 --- a/io/zenoh-link/src/lib.rs +++ b/io/zenoh-link/src/lib.rs @@ -18,40 +18,41 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use std::collections::HashMap; -use zenoh_config::Config; -use zenoh_result::{bail, ZResult}; +use zenoh_config::Config; +pub use zenoh_link_commons::*; +#[cfg(feature = "transport_quic")] +pub use zenoh_link_quic as quic; +#[cfg(feature = "transport_quic")] +use zenoh_link_quic::{ + LinkManagerUnicastQuic, QuicConfigurator, QuicLocatorInspector, QUIC_LOCATOR_PREFIX, +}; +#[cfg(feature = "transport_serial")] +pub use zenoh_link_serial as serial; +#[cfg(feature = "transport_serial")] +use zenoh_link_serial::{LinkManagerUnicastSerial, SerialLocatorInspector, SERIAL_LOCATOR_PREFIX}; #[cfg(feature = "transport_tcp")] pub use zenoh_link_tcp as tcp; #[cfg(feature = "transport_tcp")] use zenoh_link_tcp::{LinkManagerUnicastTcp, TcpLocatorInspector, TCP_LOCATOR_PREFIX}; - -#[cfg(feature = "transport_udp")] -pub use zenoh_link_udp as udp; -#[cfg(feature = "transport_udp")] -use zenoh_link_udp::{ - LinkManagerMulticastUdp, LinkManagerUnicastUdp, UdpLocatorInspector, UDP_LOCATOR_PREFIX, -}; - #[cfg(feature = "transport_tls")] pub use zenoh_link_tls as tls; #[cfg(feature = "transport_tls")] use zenoh_link_tls::{ LinkManagerUnicastTls, TlsConfigurator, TlsLocatorInspector, TLS_LOCATOR_PREFIX, }; - -#[cfg(feature = "transport_quic")] -pub use zenoh_link_quic as quic; -#[cfg(feature = "transport_quic")] -use zenoh_link_quic::{ - LinkManagerUnicastQuic, QuicConfigurator, QuicLocatorInspector, QUIC_LOCATOR_PREFIX, +#[cfg(feature = "transport_udp")] +pub use zenoh_link_udp as udp; +#[cfg(feature = "transport_udp")] +use zenoh_link_udp::{ + LinkManagerMulticastUdp, LinkManagerUnicastUdp, UdpLocatorInspector, UDP_LOCATOR_PREFIX, +}; +#[cfg(feature = "transport_unixpipe")] +pub use zenoh_link_unixpipe as unixpipe; +#[cfg(feature = "transport_unixpipe")] +use zenoh_link_unixpipe::{ + LinkManagerUnicastPipe, UnixPipeConfigurator, UnixPipeLocatorInspector, UNIXPIPE_LOCATOR_PREFIX, }; - -#[cfg(feature = "transport_ws")] -pub use zenoh_link_ws as ws; -#[cfg(feature = "transport_ws")] -use zenoh_link_ws::{LinkManagerUnicastWs, WsLocatorInspector, WS_LOCATOR_PREFIX}; - #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] pub use zenoh_link_unixsock_stream as unixsock_stream; #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] @@ -59,26 +60,16 @@ use zenoh_link_unixsock_stream::{ LinkManagerUnicastUnixSocketStream, UnixSockStreamLocatorInspector, UNIXSOCKSTREAM_LOCATOR_PREFIX, }; - -#[cfg(feature = "transport_serial")] -pub use zenoh_link_serial as serial; -#[cfg(feature = "transport_serial")] -use zenoh_link_serial::{LinkManagerUnicastSerial, SerialLocatorInspector, SERIAL_LOCATOR_PREFIX}; - -#[cfg(feature = "transport_unixpipe")] -pub use zenoh_link_unixpipe as unixpipe; -#[cfg(feature = "transport_unixpipe")] -use zenoh_link_unixpipe::{ - LinkManagerUnicastPipe, UnixPipeConfigurator, UnixPipeLocatorInspector, UNIXPIPE_LOCATOR_PREFIX, -}; - #[cfg(all(feature = "transport_vsock", target_os = "linux"))] pub use zenoh_link_vsock as vsock; #[cfg(all(feature = "transport_vsock", target_os = "linux"))] use zenoh_link_vsock::{LinkManagerUnicastVsock, VsockLocatorInspector, VSOCK_LOCATOR_PREFIX}; - -pub use zenoh_link_commons::*; +#[cfg(feature = "transport_ws")] +pub use zenoh_link_ws as ws; +#[cfg(feature = "transport_ws")] +use zenoh_link_ws::{LinkManagerUnicastWs, WsLocatorInspector, WS_LOCATOR_PREFIX}; pub use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_result::{bail, ZResult}; pub const PROTOCOLS: &[&str] = &[ #[cfg(feature = "transport_quic")] diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index a60f84c559..a7303a9622 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -18,7 +18,6 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use async_trait::async_trait; - use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::{core::Locator, transport::BatchSize}; diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 05d33dff49..a3b2687b6f 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -12,17 +12,14 @@ // ZettaScale Zenoh Team, // -use crate::{ - config::*, - utils::{get_quic_addr, TlsClientConfig, TlsServerConfig}, - ALPN_QUIC_HTTP, QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, +use std::{ + fmt, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + time::Duration, }; + use async_trait::async_trait; -use std::fmt; -use std::net::IpAddr; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use std::time::Duration; use tokio::sync::Mutex as AsyncMutex; use tokio_util::sync::CancellationToken; use zenoh_core::zasynclock; @@ -30,10 +27,18 @@ use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, ZResult}; +use crate::{ + config::*, + utils::{get_quic_addr, TlsClientConfig, TlsServerConfig}, + ALPN_QUIC_HTTP, QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, +}; + pub struct LinkUnicastQuic { connection: quinn::Connection, src_addr: SocketAddr, diff --git a/io/zenoh-links/zenoh-link-quic/src/utils.rs b/io/zenoh-links/zenoh-link-quic/src/utils.rs index e7537bd658..1eb8f94380 100644 --- a/io/zenoh-links/zenoh-link-quic/src/utils.rs +++ b/io/zenoh-links/zenoh-link-quic/src/utils.rs @@ -11,30 +11,32 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::config::*; -use crate::verify::WebPkiVerifierAnyServerName; -use rustls::OwnedTrustAnchor; -use rustls::{ - server::AllowAnyAuthenticatedClient, version::TLS13, Certificate, ClientConfig, PrivateKey, - RootCertStore, ServerConfig, -}; -use rustls_pki_types::{CertificateDer, TrustAnchor}; -use secrecy::ExposeSecret; -use zenoh_link_commons::ConfigurationInspector; // use rustls_pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}; -use std::fs::File; -use std::io; -use std::net::SocketAddr; use std::{ + fs::File, + io, io::{BufReader, Cursor}, + net::SocketAddr, sync::Arc, }; + +use rustls::{ + server::AllowAnyAuthenticatedClient, version::TLS13, Certificate, ClientConfig, + OwnedTrustAnchor, PrivateKey, RootCertStore, ServerConfig, +}; +use rustls_pki_types::{CertificateDer, TrustAnchor}; +use secrecy::ExposeSecret; use webpki::anchor_from_trusted_cert; use zenoh_config::Config as ZenohConfig; -use zenoh_protocol::core::endpoint::{Address, Config}; -use zenoh_protocol::core::Parameters; +use zenoh_link_commons::ConfigurationInspector; +use zenoh_protocol::core::{ + endpoint::{Address, Config}, + Parameters, +}; use zenoh_result::{bail, zerror, ZError, ZResult}; +use crate::{config::*, verify::WebPkiVerifierAnyServerName}; + #[derive(Default, Clone, Copy, Debug)] pub struct TlsConfigurator; @@ -498,8 +500,7 @@ pub async fn get_quic_addr(address: &Address<'_>) -> ZResult { } pub fn base64_decode(data: &str) -> ZResult> { - use base64::engine::general_purpose; - use base64::Engine; + use base64::{engine::general_purpose, Engine}; Ok(general_purpose::STANDARD .decode(data) .map_err(|e| zerror!("Unable to perform base64 decoding: {e:?}"))?) diff --git a/io/zenoh-links/zenoh-link-quic/src/verify.rs b/io/zenoh-links/zenoh-link-quic/src/verify.rs index baa7864246..544d7c8a65 100644 --- a/io/zenoh-links/zenoh-link-quic/src/verify.rs +++ b/io/zenoh-links/zenoh-link-quic/src/verify.rs @@ -1,6 +1,6 @@ -use rustls::client::verify_server_cert_signed_by_trust_anchor; -use rustls::server::ParsedCertificate; use std::time::SystemTime; + +use rustls::{client::verify_server_cert_signed_by_trust_anchor, server::ParsedCertificate}; use tokio_rustls::rustls::{ client::{ServerCertVerified, ServerCertVerifier}, Certificate, RootCertStore, ServerName, diff --git a/io/zenoh-links/zenoh-link-serial/src/lib.rs b/io/zenoh-links/zenoh-link-serial/src/lib.rs index f7b0b7afeb..3d2ddcd0e6 100644 --- a/io/zenoh-links/zenoh-link-serial/src/lib.rs +++ b/io/zenoh-links/zenoh-link-serial/src/lib.rs @@ -19,13 +19,16 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) mod unicast; -use async_trait::async_trait; use std::str::FromStr; + +use async_trait::async_trait; pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{endpoint::Address, EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::ZResult; // Maximum MTU (Serial PDU) in bytes. diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index 239ff6bb9d..ca4efacdc6 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -12,35 +12,40 @@ // ZettaScale Zenoh Team, // +use std::{ + cell::UnsafeCell, + collections::HashMap, + fmt, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, +use tokio::{ + sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}, + task::JoinHandle, }; -use std::time::Duration; -use tokio::sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}; -use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; +use z_serial::ZSerial; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; -use z_serial::ZSerial; - -use crate::get_exclusive; - use super::{ get_baud_rate, get_unix_path_as_string, SERIAL_ACCEPT_THROTTLE_TIME, SERIAL_DEFAULT_MTU, SERIAL_LOCATOR_PREFIX, }; +use crate::get_exclusive; struct LinkUnicastSerial { // The underlying serial port as returned by ZSerial (tokio-serial) diff --git a/io/zenoh-links/zenoh-link-tcp/src/lib.rs b/io/zenoh-links/zenoh-link-tcp/src/lib.rs index 0b075d9bf8..ebc2bba70b 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/lib.rs @@ -17,12 +17,15 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_trait::async_trait; use std::net::SocketAddr; + +use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; mod unicast; diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 1a8a2302d1..79812c526e 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -11,28 +11,28 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{cell::UnsafeCell, convert::TryInto, fmt, net::SocketAddr, sync::Arc, time::Duration}; + use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::convert::TryInto; -use std::fmt; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{TcpListener, TcpSocket, TcpStream}, +}; use tokio_util::sync::CancellationToken; use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use super::{ get_tcp_addrs, TCP_ACCEPT_THROTTLE_TIME, TCP_DEFAULT_MTU, TCP_LINGER_TIMEOUT, TCP_LOCATOR_PREFIX, }; -use tokio::net::{TcpListener, TcpSocket, TcpStream}; pub struct LinkUnicastTcp { // The underlying socket as returned from the tokio library diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 5cf686cdc5..1ced1a26b1 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -11,21 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - utils::{get_tls_addr, get_tls_host, get_tls_server_name, TlsClientConfig, TlsServerConfig}, - TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, -}; +use std::{cell::UnsafeCell, convert::TryInto, fmt, net::SocketAddr, sync::Arc, time::Duration}; use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::convert::TryInto; -use std::fmt; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::net::{TcpListener, TcpStream}; -use tokio::sync::Mutex as AsyncMutex; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{TcpListener, TcpStream}, + sync::Mutex as AsyncMutex, +}; use tokio_rustls::{TlsAcceptor, TlsConnector, TlsStream}; use tokio_util::sync::CancellationToken; use zenoh_core::zasynclock; @@ -33,10 +26,17 @@ use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; +use crate::{ + utils::{get_tls_addr, get_tls_host, get_tls_server_name, TlsClientConfig, TlsServerConfig}, + TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, +}; + pub struct LinkUnicastTls { // The underlying socket as returned from the async-rustls library // NOTE: TlsStream requires &mut for read and write operations. This means diff --git a/io/zenoh-links/zenoh-link-tls/src/utils.rs b/io/zenoh-links/zenoh-link-tls/src/utils.rs index d51a17c694..b646c6e80d 100644 --- a/io/zenoh-links/zenoh-link-tls/src/utils.rs +++ b/io/zenoh-links/zenoh-link-tls/src/utils.rs @@ -11,7 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::config::*; +use std::{ + convert::TryFrom, + fs::File, + io, + io::{BufReader, Cursor}, + net::SocketAddr, + sync::Arc, +}; + use rustls::{ pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}, server::WebPkiClientVerifier, @@ -20,20 +28,17 @@ use rustls::{ }; use rustls_pki_types::ServerName; use secrecy::ExposeSecret; -use std::fs::File; -use std::io; -use std::{convert::TryFrom, net::SocketAddr}; -use std::{ - io::{BufReader, Cursor}, - sync::Arc, -}; use webpki::anchor_from_trusted_cert; use zenoh_config::Config as ZenohConfig; use zenoh_link_commons::{tls::WebPkiVerifierAnyServerName, ConfigurationInspector}; -use zenoh_protocol::core::endpoint::{Address, Config}; -use zenoh_protocol::core::Parameters; +use zenoh_protocol::core::{ + endpoint::{Address, Config}, + Parameters, +}; use zenoh_result::{bail, zerror, ZError, ZResult}; +use crate::config::*; + #[derive(Default, Clone, Copy, Debug)] pub struct TlsConfigurator; @@ -450,8 +455,7 @@ fn load_trust_anchors(config: &Config<'_>) -> ZResult> { } pub fn base64_decode(data: &str) -> ZResult> { - use base64::engine::general_purpose; - use base64::Engine; + use base64::{engine::general_purpose, Engine}; Ok(general_purpose::STANDARD .decode(data) .map_err(|e| zerror!("Unable to perform base64 decoding: {e:?}"))?) diff --git a/io/zenoh-links/zenoh-link-udp/src/lib.rs b/io/zenoh-links/zenoh-link-udp/src/lib.rs index 86db845d8f..c89708fe5d 100644 --- a/io/zenoh-links/zenoh-link-udp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-udp/src/lib.rs @@ -20,14 +20,17 @@ mod multicast; mod unicast; +use std::net::SocketAddr; + use async_trait::async_trait; pub use multicast::*; -use std::net::SocketAddr; pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; // NOTE: In case of using UDP in high-throughput scenarios, it is recommended to set the diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index f4a23ced93..280f5eb203 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -11,19 +11,26 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{config::*, UDP_DEFAULT_MTU}; -use crate::{get_udp_addrs, socket_addr_to_udp_locator}; +use std::{ + borrow::Cow, + fmt, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, +}; + use async_trait::async_trait; use socket2::{Domain, Protocol, Socket, Type}; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use std::{borrow::Cow, fmt}; use tokio::net::UdpSocket; use zenoh_link_commons::{LinkManagerMulticastTrait, LinkMulticast, LinkMulticastTrait}; -use zenoh_protocol::core::{Config, EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{Config, EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; +use super::{config::*, UDP_DEFAULT_MTU}; +use crate::{get_udp_addrs, socket_addr_to_udp_locator}; + pub struct LinkMulticastUdp { // The unicast socket address of this link unicast_addr: SocketAddr, diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 9526ca74dd..79f980ca96 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -11,29 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - get_udp_addrs, socket_addr_to_udp_locator, UDP_ACCEPT_THROTTLE_TIME, UDP_DEFAULT_MTU, - UDP_MAX_MTU, +use std::{ + collections::HashMap, + fmt, + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::{Arc, Mutex, Weak}, + time::Duration, }; + use async_trait::async_trait; -use std::collections::HashMap; -use std::fmt; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::{Arc, Mutex, Weak}; -use std::time::Duration; -use tokio::net::UdpSocket; -use tokio::sync::Mutex as AsyncMutex; +use tokio::{net::UdpSocket, sync::Mutex as AsyncMutex}; use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zlock}; use zenoh_link_commons::{ get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use zenoh_sync::Mvar; +use super::{ + get_udp_addrs, socket_addr_to_udp_locator, UDP_ACCEPT_THROTTLE_TIME, UDP_DEFAULT_MTU, + UDP_MAX_MTU, +}; + type LinkHashMap = Arc>>>; type LinkInput = (Vec, usize); type LinkLeftOver = (Vec, usize, usize); diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index ea90630523..1b30ceb553 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -11,41 +11,43 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::config; +use std::{ + cell::UnsafeCell, + collections::HashMap, + fmt, + fs::{File, OpenOptions}, + io::{ErrorKind, Read, Write}, + os::unix::fs::OpenOptionsExt, + sync::Arc, +}; + #[cfg(not(target_os = "macos"))] use advisory_lock::{AdvisoryFileLock, FileLockMode}; use async_trait::async_trait; use filepath::FilePath; -use nix::libc; -use nix::unistd::unlink; +use nix::{libc, unistd::unlink}; use rand::Rng; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::fs::{File, OpenOptions}; -use std::io::ErrorKind; -use std::io::{Read, Write}; -use std::os::unix::fs::OpenOptionsExt; -use std::sync::Arc; -use tokio::fs::remove_file; -use tokio::io::unix::AsyncFd; -use tokio::io::Interest; -use tokio::task::JoinHandle; +use tokio::{ + fs::remove_file, + io::{unix::AsyncFd, Interest}, + task::JoinHandle, +}; use tokio_util::sync::CancellationToken; -use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, Wait}; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; -use zenoh_runtime::ZRuntime; - use unix_named_pipe::{create, open_write}; - +use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, Wait}; use zenoh_link_commons::{ ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, ZResult}; +use zenoh_runtime::ZRuntime; use super::FILE_ACCESS_MASK; +use crate::config; const LINUX_PIPE_MAX_MTU: BatchSize = BatchSize::MAX; const LINUX_PIPE_DEDICATE_TRIES: usize = 100; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs index ce067c1aa2..771782e62a 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs @@ -20,8 +20,10 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::ZResult; #[cfg(target_family = "unix")] mod unicast; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index 4ad1b68d88..cc7147c9e0 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -11,31 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME; +use std::{ + cell::UnsafeCell, collections::HashMap, fmt, fs::remove_file, os::unix::io::RawFd, + path::PathBuf, sync::Arc, time::Duration, +}; + use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::fs::remove_file; -use std::os::unix::io::RawFd; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::net::{UnixListener, UnixStream}; -use tokio::sync::RwLock as AsyncRwLock; -use tokio::task::JoinHandle; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{UnixListener, UnixStream}, + sync::RwLock as AsyncRwLock, + task::JoinHandle, +}; use tokio_util::sync::CancellationToken; use uuid::Uuid; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; -use super::{get_unix_path_as_string, UNIXSOCKSTREAM_DEFAULT_MTU, UNIXSOCKSTREAM_LOCATOR_PREFIX}; +use super::{ + get_unix_path_as_string, UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME, UNIXSOCKSTREAM_DEFAULT_MTU, + UNIXSOCKSTREAM_LOCATOR_PREFIX, +}; pub struct LinkUnicastUnixSocketStream { // The underlying socket as returned from the tokio library diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index 6616790a28..605f114173 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -12,17 +12,20 @@ // ZettaScale Zenoh Team, // +use std::{cell::UnsafeCell, collections::HashMap, fmt, sync::Arc, time::Duration}; + use async_trait::async_trait; use libc::VMADDR_PORT_ANY; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::sync::RwLock as AsyncRwLock; -use tokio::task::JoinHandle; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + sync::RwLock as AsyncRwLock, + task::JoinHandle, +}; use tokio_util::sync::CancellationToken; +use tokio_vsock::{ + VsockAddr, VsockListener, VsockStream, VMADDR_CID_ANY, VMADDR_CID_HOST, VMADDR_CID_HYPERVISOR, + VMADDR_CID_LOCAL, +}; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, @@ -34,10 +37,6 @@ use zenoh_protocol::{ use zenoh_result::{bail, zerror, ZResult}; use super::{VSOCK_ACCEPT_THROTTLE_TIME, VSOCK_DEFAULT_MTU, VSOCK_LOCATOR_PREFIX}; -use tokio_vsock::{ - VsockAddr, VsockListener, VsockStream, VMADDR_CID_ANY, VMADDR_CID_HOST, VMADDR_CID_HYPERVISOR, - VMADDR_CID_LOCAL, -}; pub const VSOCK_VMADDR_CID_ANY: &str = "VMADDR_CID_ANY"; pub const VSOCK_VMADDR_CID_HYPERVISOR: &str = "VMADDR_CID_HYPERVISOR"; diff --git a/io/zenoh-links/zenoh-link-ws/src/lib.rs b/io/zenoh-links/zenoh-link-ws/src/lib.rs index d165b480a9..6a97ed99b6 100644 --- a/io/zenoh-links/zenoh-link-ws/src/lib.rs +++ b/io/zenoh-links/zenoh-link-ws/src/lib.rs @@ -17,13 +17,16 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_trait::async_trait; use std::net::SocketAddr; + +use async_trait::async_trait; use url::Url; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, ZResult}; mod unicast; pub use unicast::*; diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index 635f3b8808..b671bf67f2 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -12,29 +12,34 @@ // ZettaScale Zenoh Team, // +use std::{ + collections::HashMap, + fmt, + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + time::Duration, +}; + use async_trait::async_trait; -use futures_util::stream::SplitSink; -use futures_util::stream::SplitStream; -use futures_util::SinkExt; -use futures_util::StreamExt; -use std::collections::HashMap; -use std::fmt; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use std::time::Duration; -use tokio::net::{TcpListener, TcpStream}; -use tokio::sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}; -use tokio::task::JoinHandle; -use tokio_tungstenite::accept_async; -use tokio_tungstenite::tungstenite::Message; -use tokio_tungstenite::{MaybeTlsStream, WebSocketStream}; +use futures_util::{ + stream::{SplitSink, SplitStream}, + SinkExt, StreamExt, +}; +use tokio::{ + net::{TcpListener, TcpStream}, + sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}, + task::JoinHandle, +}; +use tokio_tungstenite::{accept_async, tungstenite::Message, MaybeTlsStream, WebSocketStream}; use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, ZResult}; use super::{get_ws_addr, get_ws_url, TCP_ACCEPT_THROTTLE_TIME, WS_DEFAULT_MTU, WS_LOCATOR_PREFIX}; diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index 8048d9ff49..b91acdc7ff 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::num::NonZeroUsize; + use zenoh_buffers::{ buffer::Buffer, reader::{DidntRead, HasReader}, @@ -497,7 +498,6 @@ impl Decode<(TransportMessage, BatchSize)> for &mut RBatch { mod tests { use std::vec; - use super::*; use rand::Rng; use zenoh_buffers::ZBuf; use zenoh_core::zcondfeat; @@ -511,6 +511,8 @@ mod tests { zenoh::{PushBody, Put}, }; + use super::*; + #[test] fn rw_batch() { let mut rng = rand::thread_rng(); diff --git a/io/zenoh-transport/src/common/defragmentation.rs b/io/zenoh-transport/src/common/defragmentation.rs index 8fab075fe4..476fad632c 100644 --- a/io/zenoh-transport/src/common/defragmentation.rs +++ b/io/zenoh-transport/src/common/defragmentation.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::seq_num::SeqNum; use zenoh_buffers::{buffer::Buffer, reader::HasReader, ZBuf, ZSlice}; use zenoh_codec::{RCodec, Zenoh080Reliability}; use zenoh_protocol::{ @@ -21,6 +20,8 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, ZResult}; +use super::seq_num::SeqNum; + #[derive(Debug)] pub(crate) struct DefragBuffer { reliability: Reliability, diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index fe4e567617..349f9ed560 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -1,30 +1,13 @@ -use crate::common::batch::BatchConfig; - -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use super::{ - batch::{Encode, WBatch}, - priority::{TransportChannelTx, TransportPriorityTx}, +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, MutexGuard, + }, + time::{Duration, Instant}, }; + use flume::{bounded, Receiver, Sender}; use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; -use std::sync::{Arc, Mutex, MutexGuard}; -use std::time::Duration; -use std::{ - sync::atomic::{AtomicBool, Ordering}, - time::Instant, -}; use zenoh_buffers::{ reader::{HasReader, Reader}, writer::HasWriter, @@ -33,10 +16,9 @@ use zenoh_buffers::{ use zenoh_codec::{transport::batch::BatchError, WCodec, Zenoh080}; use zenoh_config::QueueSizeConf; use zenoh_core::zlock; -use zenoh_protocol::core::Reliability; -use zenoh_protocol::network::NetworkMessage; use zenoh_protocol::{ - core::Priority, + core::{Priority, Reliability}, + network::NetworkMessage, transport::{ fragment::FragmentHeader, frame::{self, FrameHeader}, @@ -44,6 +26,25 @@ use zenoh_protocol::{ }, }; +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use super::{ + batch::{Encode, WBatch}, + priority::{TransportChannelTx, TransportPriorityTx}, +}; +use crate::common::batch::BatchConfig; + // It's faster to work directly with nanoseconds. // Backoff will never last more the u32::MAX nanoseconds. type NanoSeconds = u32; @@ -719,7 +720,6 @@ impl TransmissionPipelineConsumer { #[cfg(test)] mod tests { - use super::*; use std::{ convert::TryFrom, sync::{ @@ -728,8 +728,8 @@ mod tests { }, time::{Duration, Instant}, }; - use tokio::task; - use tokio::time::timeout; + + use tokio::{task, time::timeout}; use zenoh_buffers::{ reader::{DidntRead, HasReader}, ZBuf, @@ -743,6 +743,8 @@ mod tests { }; use zenoh_result::ZResult; + use super::*; + const SLEEP: Duration = Duration::from_millis(100); const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/io/zenoh-transport/src/common/priority.rs b/io/zenoh-transport/src/common/priority.rs index 8644cdacb7..fb5c520e3d 100644 --- a/io/zenoh-transport/src/common/priority.rs +++ b/io/zenoh-transport/src/common/priority.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::defragmentation::DefragBuffer; -use super::seq_num::{SeqNum, SeqNumGenerator}; use std::sync::{Arc, Mutex}; + use zenoh_core::zlock; use zenoh_protocol::{ core::{Bits, Reliability}, @@ -21,6 +20,11 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +use super::{ + defragmentation::DefragBuffer, + seq_num::{SeqNum, SeqNumGenerator}, +}; + #[derive(Debug)] pub(crate) struct TransportChannelTx { pub(crate) sn: SeqNumGenerator, diff --git a/io/zenoh-transport/src/common/stats.rs b/io/zenoh-transport/src/common/stats.rs index aaf39641c0..da6e57d518 100644 --- a/io/zenoh-transport/src/common/stats.rs +++ b/io/zenoh-transport/src/common/stats.rs @@ -167,8 +167,9 @@ macro_rules! stats_struct { } } -use serde::{Deserialize, Serialize}; use std::sync::atomic::{AtomicUsize, Ordering}; + +use serde::{Deserialize, Serialize}; stats_struct! { #[derive(Clone, Debug, Deserialize, Serialize)] pub struct DiscriminatedStats { diff --git a/io/zenoh-transport/src/lib.rs b/io/zenoh-transport/src/lib.rs index 5e00bed2e7..f4c135c9d6 100644 --- a/io/zenoh-transport/src/lib.rs +++ b/io/zenoh-transport/src/lib.rs @@ -28,16 +28,19 @@ pub use common::stats; #[cfg(feature = "shared-memory")] mod shm; -use crate::{multicast::TransportMulticast, unicast::TransportUnicast}; +use std::{any::Any, sync::Arc}; + pub use manager::*; use serde::Serialize; -use std::any::Any; -use std::sync::Arc; use zenoh_link::Link; -use zenoh_protocol::core::{WhatAmI, ZenohId}; -use zenoh_protocol::network::NetworkMessage; +use zenoh_protocol::{ + core::{WhatAmI, ZenohId}, + network::NetworkMessage, +}; use zenoh_result::ZResult; +use crate::{multicast::TransportMulticast, unicast::TransportUnicast}; + /*************************************/ /* TRANSPORT */ /*************************************/ diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index ddf1fe23c1..3f57b3ceae 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -11,18 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::unicast::manager::{ - TransportManagerBuilderUnicast, TransportManagerConfigUnicast, TransportManagerStateUnicast, -}; -use super::TransportEventHandler; -use crate::multicast::manager::{ - TransportManagerBuilderMulticast, TransportManagerConfigMulticast, - TransportManagerStateMulticast, -}; +use std::{collections::HashMap, sync::Arc, time::Duration}; + use rand::{RngCore, SeedableRng}; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; use tokio::sync::Mutex as AsyncMutex; use zenoh_config::{Config, LinkRxConf, QueueConf, QueueSizeConf}; use zenoh_crypto::{BlockCipher, PseudoRng}; @@ -39,6 +30,17 @@ use zenoh_shm::api::client_storage::GLOBAL_CLIENT_STORAGE; use zenoh_shm::reader::SharedMemoryReader; use zenoh_task::TaskController; +use super::{ + unicast::manager::{ + TransportManagerBuilderUnicast, TransportManagerConfigUnicast, TransportManagerStateUnicast, + }, + TransportEventHandler, +}; +use crate::multicast::manager::{ + TransportManagerBuilderMulticast, TransportManagerConfigMulticast, + TransportManagerStateMulticast, +}; + /// # Examples /// ``` /// use std::sync::Arc; diff --git a/io/zenoh-transport/src/multicast/establishment.rs b/io/zenoh-transport/src/multicast/establishment.rs index a0b7576f03..0c24626697 100644 --- a/io/zenoh-transport/src/multicast/establishment.rs +++ b/io/zenoh-transport/src/multicast/establishment.rs @@ -11,6 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::sync::Arc; + +use rand::Rng; +use zenoh_core::zasynclock; +use zenoh_link::LinkMulticast; +use zenoh_protocol::{ + core::{Field, Priority}, + transport::PrioritySn, +}; +use zenoh_result::{bail, ZResult}; + use crate::{ common::{batch::BatchConfig, seq_num}, multicast::{ @@ -20,15 +31,6 @@ use crate::{ }, TransportManager, }; -use rand::Rng; -use std::sync::Arc; -use zenoh_core::zasynclock; -use zenoh_link::LinkMulticast; -use zenoh_protocol::{ - core::{Field, Priority}, - transport::PrioritySn, -}; -use zenoh_result::{bail, ZResult}; pub(crate) async fn open_link( manager: &TransportManager, diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 883f978684..a1c9c2bae8 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -11,25 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "stats")] -use crate::stats::TransportStats; -use crate::{ - common::{ - batch::{BatchConfig, Encode, Finalize, RBatch, WBatch}, - pipeline::{ - TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, - TransmissionPipelineProducer, - }, - priority::TransportPriorityTx, - }, - multicast::transport::TransportMulticastInner, -}; use std::{ convert::TryInto, fmt, sync::Arc, time::{Duration, Instant}, }; + use tokio::task::JoinHandle; use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; use zenoh_core::{zcondfeat, zlock}; @@ -41,6 +29,20 @@ use zenoh_protocol::{ use zenoh_result::{zerror, ZResult}; use zenoh_sync::{RecyclingObject, RecyclingObjectPool, Signal}; +#[cfg(feature = "stats")] +use crate::stats::TransportStats; +use crate::{ + common::{ + batch::{BatchConfig, Encode, Finalize, RBatch, WBatch}, + pipeline::{ + TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, + TransmissionPipelineProducer, + }, + priority::TransportPriorityTx, + }, + multicast::transport::TransportMulticastInner, +}; + /****************************/ /* TRANSPORT MULTICAST LINK */ /****************************/ diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index ebc51a2ec6..3c04cf6425 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -11,11 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::multicast::{transport::TransportMulticastInner, TransportMulticast}; -use crate::TransportManager; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; +use std::{collections::HashMap, sync::Arc, time::Duration}; + use tokio::sync::Mutex; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionMulticastConf; @@ -30,6 +27,11 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use crate::{ + multicast::{transport::TransportMulticastInner, TransportMulticast}, + TransportManager, +}; + pub struct TransportManagerConfigMulticast { pub lease: Duration, pub keep_alive: usize, diff --git a/io/zenoh-transport/src/multicast/mod.rs b/io/zenoh-transport/src/multicast/mod.rs index e205125b39..78d76bb6c8 100644 --- a/io/zenoh-transport/src/multicast/mod.rs +++ b/io/zenoh-transport/src/multicast/mod.rs @@ -17,18 +17,15 @@ pub(crate) mod rx; pub(crate) mod transport; pub(crate) mod tx; -use super::common; -use crate::{ - multicast::link::TransportLinkMulticast, TransportMulticastEventHandler, TransportPeer, +use std::{ + fmt::{self, Write}, + sync::{Arc, Weak}, }; + pub use manager::{ TransportManagerBuilderMulticast, TransportManagerConfigMulticast, TransportManagerParamsMulticast, }; -use std::{ - fmt::{self, Write}, - sync::{Arc, Weak}, -}; use transport::TransportMulticastInner; use zenoh_core::{zcondfeat, zread}; use zenoh_link::Link; @@ -39,6 +36,11 @@ use zenoh_protocol::{ }; use zenoh_result::{zerror, ZResult}; +use super::common; +use crate::{ + multicast::link::TransportLinkMulticast, TransportMulticastEventHandler, TransportPeer, +}; + /*************************************/ /* TRANSPORT MULTICAST */ /*************************************/ diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 1576d65cd6..ee8e024bb6 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -11,12 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::{TransportMulticastInner, TransportMulticastPeer}; -use crate::common::{ - batch::{Decode, RBatch}, - priority::TransportChannelRx, -}; use std::sync::MutexGuard; + use zenoh_core::{zlock, zread}; use zenoh_protocol::{ core::{Locator, Priority, Reliability}, @@ -28,6 +24,12 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use super::transport::{TransportMulticastInner, TransportMulticastPeer}; +use crate::common::{ + batch::{Decode, RBatch}, + priority::TransportChannelRx, +}; + /*************************************/ /* TRANSPORT RX */ /*************************************/ diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index babf68ce61..a60ed180ee 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -11,18 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::common::priority::{TransportPriorityRx, TransportPriorityTx}; -use super::link::{TransportLinkMulticastConfigUniversal, TransportLinkMulticastUniversal}; -#[cfg(feature = "shared-memory")] -use crate::shm::MulticastTransportShmConfig; -#[cfg(feature = "stats")] -use crate::stats::TransportStats; -use crate::{ - multicast::{ - link::TransportLinkMulticast, TransportConfigMulticast, TransportMulticastEventHandler, - }, - TransportManager, TransportPeer, TransportPeerEventHandler, -}; use std::{ collections::HashMap, sync::{ @@ -31,17 +19,31 @@ use std::{ }, time::Duration, }; + use tokio_util::sync::CancellationToken; use zenoh_core::{zcondfeat, zread, zwrite}; use zenoh_link::{Link, Locator}; -use zenoh_protocol::core::Resolution; -use zenoh_protocol::transport::{batch_size, Close, TransportMessage}; use zenoh_protocol::{ - core::{Bits, Field, Priority, WhatAmI, ZenohId}, - transport::{close, Join}, + core::{Bits, Field, Priority, Resolution, WhatAmI, ZenohId}, + transport::{batch_size, close, Close, Join, TransportMessage}, }; use zenoh_result::{bail, ZResult}; use zenoh_task::TaskController; + +use super::{ + common::priority::{TransportPriorityRx, TransportPriorityTx}, + link::{TransportLinkMulticastConfigUniversal, TransportLinkMulticastUniversal}, +}; +#[cfg(feature = "shared-memory")] +use crate::shm::MulticastTransportShmConfig; +#[cfg(feature = "stats")] +use crate::stats::TransportStats; +use crate::{ + multicast::{ + link::TransportLinkMulticast, TransportConfigMulticast, TransportMulticastEventHandler, + }, + TransportManager, TransportPeer, TransportPeerEventHandler, +}; // use zenoh_util::{Timed, TimedEvent, TimedHandle, Timer}; /*************************************/ diff --git a/io/zenoh-transport/src/multicast/tx.rs b/io/zenoh-transport/src/multicast/tx.rs index ee7715d38b..775131703a 100644 --- a/io/zenoh-transport/src/multicast/tx.rs +++ b/io/zenoh-transport/src/multicast/tx.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportMulticastInner; use zenoh_core::zread; use zenoh_protocol::network::NetworkMessage; +use super::transport::TransportMulticastInner; #[cfg(feature = "shared-memory")] use crate::shm::map_zmsg_to_partner; diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 6dd65aab16..7a50a68742 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::collections::HashSet; + use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZBuf, ZSlice, ZSliceKind}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_core::zerror; diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index 48638834e0..d074ea9642 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -11,26 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use super::ext::shm::AuthSegment; -#[cfg(feature = "shared-memory")] -use crate::shm::TransportShmConfig; +use std::time::Duration; -use crate::{ - common::batch::BatchConfig, - unicast::{ - establishment::{compute_sn, ext, AcceptFsm, Cookie, Zenoh080Cookie}, - link::{ - LinkUnicastWithOpenAck, TransportLinkUnicast, TransportLinkUnicastConfig, - TransportLinkUnicastDirection, - }, - TransportConfigUnicast, - }, - TransportManager, -}; use async_trait::async_trait; use rand::Rng; -use std::time::Duration; use tokio::sync::Mutex; use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZSlice}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; @@ -47,6 +31,23 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +#[cfg(feature = "shared-memory")] +use super::ext::shm::AuthSegment; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; +use crate::{ + common::batch::BatchConfig, + unicast::{ + establishment::{compute_sn, ext, AcceptFsm, Cookie, Zenoh080Cookie}, + link::{ + LinkUnicastWithOpenAck, TransportLinkUnicast, TransportLinkUnicastConfig, + TransportLinkUnicastDirection, + }, + TransportConfigUnicast, + }, + TransportManager, +}; + pub(super) type AcceptError = (zenoh_result::Error, Option); struct StateTransport { diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index 6f0295601c..fccce5e672 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::ext; use std::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, HasWriter, Writer}, @@ -24,6 +24,8 @@ use zenoh_protocol::{ transport::BatchSize, }; +use crate::unicast::establishment::ext; + #[derive(Debug, PartialEq)] pub(crate) struct Cookie { pub(crate) zid: ZenohId, @@ -193,10 +195,11 @@ impl Cookie { mod tests { #[test] fn codec_cookie() { - use super::*; use rand::{Rng, SeedableRng}; use zenoh_buffers::ZBuf; + use super::*; + const NUM_ITER: usize = 1_000; macro_rules! run_single { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs index beab85d18a..8d57434bc3 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs @@ -16,21 +16,19 @@ pub(crate) mod pubkey; #[cfg(feature = "auth_usrpwd")] pub(crate) mod usrpwd; -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; +use std::{convert::TryInto, marker::PhantomData}; + use async_trait::async_trait; #[cfg(feature = "auth_pubkey")] pub use pubkey::*; use rand::{CryptoRng, Rng}; -use std::convert::TryInto; -use std::marker::PhantomData; use tokio::sync::{Mutex, RwLock}; #[cfg(feature = "auth_usrpwd")] pub use usrpwd::*; -use zenoh_buffers::reader::SiphonableReader; -use zenoh_buffers::ZBuf; use zenoh_buffers::{ - reader::{DidntRead, HasReader, Reader}, + reader::{DidntRead, HasReader, Reader, SiphonableReader}, writer::{DidntWrite, HasWriter, Writer}, + ZBuf, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_config::Config; @@ -41,6 +39,8 @@ use zenoh_protocol::{ transport::{init, open}, }; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + pub(crate) mod id { #[cfg(feature = "auth_pubkey")] pub(crate) const PUBKEY: u8 = 0x1; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs index 9a7c3d8f32..69b4707bf0 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs @@ -11,7 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; +use std::{collections::HashSet, fmt, ops::Deref, path::Path}; + use async_trait::async_trait; use rand::Rng; use rsa::{ @@ -19,7 +20,6 @@ use rsa::{ traits::PublicKeyParts, BigUint, Pkcs1v15Encrypt, RsaPrivateKey, RsaPublicKey, }; -use std::{collections::HashSet, fmt, ops::Deref, path::Path}; use tokio::sync::{Mutex, RwLock}; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, @@ -31,10 +31,13 @@ use zenoh_core::{bail, zasynclock, zasyncread, zerror, Error as ZError, Result a use zenoh_crypto::PseudoRng; use zenoh_protocol::common::{ZExtUnit, ZExtZBuf}; +use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; + mod ext { - use super::{id::PUBKEY, ZExtUnit, ZExtZBuf}; use zenoh_protocol::{zextunit, zextzbuf}; + use super::{id::PUBKEY, ZExtUnit, ZExtZBuf}; + pub(super) type InitSyn = zextzbuf!(PUBKEY, false); pub(super) type InitAck = zextzbuf!(PUBKEY, false); pub(super) type OpenSyn = zextzbuf!(PUBKEY, false); diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index 23560e307e..be24337fad 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; +use std::{collections::HashMap, fmt}; + use async_trait::async_trait; use rand::{CryptoRng, Rng}; -use std::{collections::HashMap, fmt}; use tokio::sync::RwLock; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, @@ -26,10 +26,13 @@ use zenoh_core::{bail, zasyncread, zerror, Error as ZError, Result as ZResult}; use zenoh_crypto::hmac; use zenoh_protocol::common::{ZExtUnit, ZExtZ64, ZExtZBuf}; +use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; + mod ext { - use super::{id::USRPWD, ZExtUnit, ZExtZ64, ZExtZBuf}; use zenoh_protocol::{zextunit, zextz64, zextzbuf}; + use super::{id::USRPWD, ZExtUnit, ZExtZ64, ZExtZBuf}; + pub(super) type InitSyn = zextunit!(USRPWD, false); pub(super) type InitAck = zextz64!(USRPWD, false); pub(super) type OpenSyn = zextzbuf!(USRPWD, false); @@ -451,10 +454,12 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn authenticator_usrpwd_config() { async fn inner() { - use super::AuthUsrPwd; use std::{fs::File, io::Write}; + use zenoh_config::UsrPwdConf; + use super::AuthUsrPwd; + /* [CONFIG] */ let f1 = "zenoh-test-auth-usrpwd.txt"; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/compression.rs b/io/zenoh-transport/src/unicast/establishment/ext/compression.rs index 2b57eb85db..1d4e995af6 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/compression.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/compression.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; -use async_trait::async_trait; use core::marker::PhantomData; + +use async_trait::async_trait; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::transport::{init, open}; use zenoh_result::Error as ZError; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + // Extension Fsm pub(crate) struct CompressionFsm<'a> { _a: PhantomData<&'a ()>, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs b/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs index 9dda9175b1..ff1efc90b9 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; -use async_trait::async_trait; use core::marker::PhantomData; + +use async_trait::async_trait; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::transport::{init, open}; use zenoh_result::Error as ZError; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + // Extension Fsm pub(crate) struct LowLatencyFsm<'a> { _a: PhantomData<&'a ()>, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs index f8e74779cf..8980766888 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs @@ -11,10 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{ - ext::auth::pubkey::{self, AuthPubKey, AuthPubKeyFsm, ZPublicKey}, - AcceptFsm, OpenFsm, -}; use async_trait::async_trait; use rand::{CryptoRng, Rng}; use rsa::{BigUint, RsaPrivateKey, RsaPublicKey}; @@ -28,6 +24,11 @@ use zenoh_core::{zerror, Error as ZError, Result as ZResult}; use zenoh_crypto::PseudoRng; use zenoh_protocol::transport::{init, open}; +use crate::unicast::establishment::{ + ext::auth::pubkey::{self, AuthPubKey, AuthPubKeyFsm, ZPublicKey}, + AcceptFsm, OpenFsm, +}; + const KEY_SIZE: usize = 512; // Extension Fsm diff --git a/io/zenoh-transport/src/unicast/establishment/ext/qos.rs b/io/zenoh-transport/src/unicast/establishment/ext/qos.rs index 4626ec5998..f749073805 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/qos.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/qos.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; -use async_trait::async_trait; use core::marker::PhantomData; + +use async_trait::async_trait; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::transport::{init, open}; use zenoh_result::Error as ZError; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + // Extension Fsm pub(crate) struct QoSFsm<'a> { _a: PhantomData<&'a ()>, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index 1287095a51..bc96d2e34a 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; +use std::ops::Deref; + use async_trait::async_trait; use rand::{Rng, SeedableRng}; -use std::ops::Deref; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, HasWriter, Writer}, @@ -26,6 +26,8 @@ use zenoh_protocol::transport::{init, open}; use zenoh_result::{zerror, Error as ZError, ZResult}; use zenoh_shm::{api::common::types::ProtocolID, posix_shm::array::ArrayInSHM}; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + /*************************************/ /* Segment */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/establishment/mod.rs b/io/zenoh-transport/src/unicast/establishment/mod.rs index f79aa826d0..79627f4c49 100644 --- a/io/zenoh-transport/src/unicast/establishment/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/mod.rs @@ -16,7 +16,6 @@ pub(super) mod cookie; pub mod ext; pub(crate) mod open; -use crate::common::seq_num; use async_trait::async_trait; use cookie::*; use sha3::{ @@ -28,6 +27,8 @@ use zenoh_protocol::{ transport::TransportSn, }; +use crate::common::seq_num; + /*************************************/ /* TRAITS */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index 40aa959d10..49c57d9e9a 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -11,6 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::time::Duration; + +use async_trait::async_trait; +use zenoh_buffers::ZSlice; +#[cfg(feature = "transport_auth")] +use zenoh_core::zasynclock; +use zenoh_core::{zcondfeat, zerror}; +use zenoh_link::LinkUnicast; +use zenoh_protocol::{ + core::{Field, Resolution, WhatAmI, ZenohId}, + transport::{ + batch_size, close, BatchSize, Close, InitSyn, OpenSyn, TransportBody, TransportMessage, + TransportSn, + }, +}; +use zenoh_result::ZResult; + #[cfg(feature = "shared-memory")] use super::ext::shm::AuthSegment; #[cfg(feature = "shared-memory")] @@ -27,21 +44,6 @@ use crate::{ }, TransportManager, }; -use async_trait::async_trait; -use std::time::Duration; -use zenoh_buffers::ZSlice; -#[cfg(feature = "transport_auth")] -use zenoh_core::zasynclock; -use zenoh_core::{zcondfeat, zerror}; -use zenoh_link::LinkUnicast; -use zenoh_protocol::{ - core::{Field, Resolution, WhatAmI, ZenohId}, - transport::{ - batch_size, close, BatchSize, Close, InitSyn, OpenSyn, TransportBody, TransportMessage, - TransportSn, - }, -}; -use zenoh_result::ZResult; type OpenError = (zenoh_result::Error, Option); diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index 1c9c190aae..b76bc764ef 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -11,15 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::batch::{BatchConfig, Decode, Encode, Finalize, RBatch, WBatch}; -use std::fmt; -use std::sync::Arc; +use std::{fmt, sync::Arc}; + use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; use zenoh_core::zcondfeat; use zenoh_link::{Link, LinkUnicast}; use zenoh_protocol::transport::{BatchSize, Close, OpenAck, TransportMessage}; use zenoh_result::{zerror, ZResult}; +use crate::common::batch::{BatchConfig, Decode, Encode, Finalize, RBatch, WBatch}; + #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub(crate) enum TransportLinkUnicastDirection { Inbound, diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 7958631a8b..6dcd2fde44 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -11,24 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastLowlatency; -#[cfg(feature = "stats")] -use crate::stats::TransportStats; -use crate::unicast::link::TransportLinkUnicast; -use crate::unicast::link::TransportLinkUnicastRx; -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; + use tokio::sync::RwLock; use tokio_util::sync::CancellationToken; use zenoh_buffers::{writer::HasWriter, ZSlice}; use zenoh_codec::*; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link::LinkUnicast; -use zenoh_protocol::transport::TransportMessageLowLatency; -use zenoh_protocol::transport::{KeepAlive, TransportBodyLowLatency}; +use zenoh_protocol::transport::{KeepAlive, TransportBodyLowLatency, TransportMessageLowLatency}; use zenoh_result::{zerror, ZResult}; use zenoh_runtime::ZRuntime; +use super::transport::TransportUnicastLowlatency; +#[cfg(feature = "stats")] +use crate::stats::TransportStats; +use crate::unicast::link::{TransportLinkUnicast, TransportLinkUnicastRx}; + pub(crate) async fn send_with_link( link: &LinkUnicast, msg: TransportMessageLowLatency, diff --git a/io/zenoh-transport/src/unicast/lowlatency/rx.rs b/io/zenoh-transport/src/unicast/lowlatency/rx.rs index de0b62354f..c82e172c7b 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/rx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/rx.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastLowlatency; use zenoh_buffers::{ reader::{HasReader, Reader}, ZSlice, @@ -22,6 +21,8 @@ use zenoh_link::LinkUnicast; use zenoh_protocol::{network::NetworkMessage, transport::TransportMessageLowLatency}; use zenoh_result::{zerror, ZResult}; +use super::transport::TransportUnicastLowlatency; + /*************************************/ /* TRANSPORT RX */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index 726d21bb84..9f122e9c72 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -11,6 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + sync::{Arc, RwLock as SyncRwLock}, + time::Duration, +}; + +use async_trait::async_trait; +use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard, RwLock}; +use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{WhatAmI, ZenohId}, + network::NetworkMessage, + transport::{close, Close, TransportBodyLowLatency, TransportMessageLowLatency, TransportSn}, +}; +use zenoh_result::{zerror, ZResult}; + #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ @@ -21,23 +38,6 @@ use crate::{ }, TransportManager, TransportPeerEventHandler, }; -use async_trait::async_trait; -use std::sync::{Arc, RwLock as SyncRwLock}; -use std::time::Duration; -use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard, RwLock}; -use tokio_util::sync::CancellationToken; -use tokio_util::task::TaskTracker; -use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; -use zenoh_link::Link; -use zenoh_protocol::network::NetworkMessage; -use zenoh_protocol::transport::TransportBodyLowLatency; -use zenoh_protocol::transport::TransportMessageLowLatency; -use zenoh_protocol::transport::{Close, TransportSn}; -use zenoh_protocol::{ - core::{WhatAmI, ZenohId}, - transport::close, -}; -use zenoh_result::{zerror, ZResult}; /*************************************/ /* LOW-LATENCY TRANSPORT */ diff --git a/io/zenoh-transport/src/unicast/lowlatency/tx.rs b/io/zenoh-transport/src/unicast/lowlatency/tx.rs index d573544340..90304a196d 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/tx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/tx.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastLowlatency; use zenoh_protocol::{ network::NetworkMessage, transport::{TransportBodyLowLatency, TransportMessageLowLatency}, @@ -20,6 +19,7 @@ use zenoh_protocol::{ use zenoh_result::bail; use zenoh_result::ZResult; +use super::transport::TransportUnicastLowlatency; #[cfg(feature = "shared-memory")] use crate::shm::map_zmsg_to_partner; diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 6844f30163..b92462276a 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -11,22 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use super::establishment::ext::shm::AuthUnicast; -use super::{link::LinkUnicastWithOpenAck, transport_unicast_inner::InitTransportResult}; -#[cfg(feature = "transport_auth")] -use crate::unicast::establishment::ext::auth::Auth; -#[cfg(feature = "transport_multilink")] -use crate::unicast::establishment::ext::multilink::MultiLink; -use crate::{ - unicast::{ - lowlatency::transport::TransportUnicastLowlatency, - transport_unicast_inner::{InitTransportError, TransportUnicastTrait}, - universal::transport::TransportUnicastUniversal, - TransportConfigUnicast, TransportUnicast, - }, - TransportManager, TransportPeer, -}; use std::{ collections::HashMap, sync::{ @@ -35,6 +19,7 @@ use std::{ }, time::Duration, }; + use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionUnicastConf; @@ -52,6 +37,23 @@ use zenoh_result::{bail, zerror, ZResult}; #[cfg(feature = "shared-memory")] use zenoh_shm::reader::SharedMemoryReader; +#[cfg(feature = "shared-memory")] +use super::establishment::ext::shm::AuthUnicast; +use super::{link::LinkUnicastWithOpenAck, transport_unicast_inner::InitTransportResult}; +#[cfg(feature = "transport_auth")] +use crate::unicast::establishment::ext::auth::Auth; +#[cfg(feature = "transport_multilink")] +use crate::unicast::establishment::ext::multilink::MultiLink; +use crate::{ + unicast::{ + lowlatency::transport::TransportUnicastLowlatency, + transport_unicast_inner::{InitTransportError, TransportUnicastTrait}, + universal::transport::TransportUnicastUniversal, + TransportConfigUnicast, TransportUnicast, + }, + TransportManager, TransportPeer, +}; + /*************************************/ /* TRANSPORT CONFIG */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index 630b56aa1b..1726ba2559 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -21,26 +21,28 @@ pub(crate) mod universal; #[cfg(feature = "test")] pub mod test_helpers; -#[cfg(feature = "shared-memory")] -use crate::shm::TransportShmConfig; - -use self::transport_unicast_inner::TransportUnicastTrait; +use std::{ + fmt, + sync::{Arc, Weak}, +}; -use super::{TransportPeer, TransportPeerEventHandler}; #[cfg(feature = "transport_multilink")] use establishment::ext::auth::ZPublicKey; pub use manager::*; -use std::fmt; -use std::sync::{Arc, Weak}; use zenoh_core::zcondfeat; use zenoh_link::Link; -use zenoh_protocol::network::NetworkMessage; use zenoh_protocol::{ core::{Bits, WhatAmI, ZenohId}, + network::NetworkMessage, transport::{close, TransportSn}, }; use zenoh_result::{zerror, ZResult}; +use self::transport_unicast_inner::TransportUnicastTrait; +use super::{TransportPeer, TransportPeerEventHandler}; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; + /*************************************/ /* TRANSPORT UNICAST */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/test_helpers.rs b/io/zenoh-transport/src/unicast/test_helpers.rs index 42ed6db927..6d25ae0d77 100644 --- a/io/zenoh-transport/src/unicast/test_helpers.rs +++ b/io/zenoh-transport/src/unicast/test_helpers.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{unicast::TransportManagerBuilderUnicast, TransportManager}; use zenoh_core::zcondfeat; +use crate::{unicast::TransportManagerBuilderUnicast, TransportManager}; + pub fn make_transport_manager_builder( #[cfg(feature = "transport_multilink")] max_links: usize, #[cfg(feature = "shared-memory")] with_shm: bool, diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index f6dc39529d..fcc5d41029 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -12,12 +12,9 @@ // ZettaScale Zenoh Team, // -use crate::{ - unicast::{link::TransportLinkUnicast, TransportConfigUnicast}, - TransportPeerEventHandler, -}; -use async_trait::async_trait; use std::{fmt::DebugStruct, sync::Arc, time::Duration}; + +use async_trait::async_trait; use tokio::sync::MutexGuard as AsyncMutexGuard; use zenoh_link::Link; use zenoh_protocol::{ @@ -28,6 +25,10 @@ use zenoh_protocol::{ use zenoh_result::ZResult; use super::link::{LinkUnicastWithOpenAck, MaybeOpenAck}; +use crate::{ + unicast::{link::TransportLinkUnicast, TransportConfigUnicast}, + TransportPeerEventHandler, +}; pub(crate) type LinkError = (zenoh_result::Error, TransportLinkUnicast, u8); pub(crate) type TransportError = (zenoh_result::Error, Arc, u8); diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 8d5d703be1..e0c3cd3db5 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -11,6 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::time::Duration; + +use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh_buffers::ZSliceBuffer; +use zenoh_protocol::transport::{KeepAlive, TransportMessage}; +use zenoh_result::{zerror, ZResult}; +use zenoh_sync::{RecyclingObject, RecyclingObjectPool}; +#[cfg(feature = "stats")] +use {crate::common::stats::TransportStats, std::sync::Arc}; + use super::transport::TransportUnicastUniversal; use crate::{ common::{ @@ -23,14 +33,6 @@ use crate::{ }, unicast::link::{TransportLinkUnicast, TransportLinkUnicastRx, TransportLinkUnicastTx}, }; -use std::time::Duration; -use tokio_util::{sync::CancellationToken, task::TaskTracker}; -use zenoh_buffers::ZSliceBuffer; -use zenoh_protocol::transport::{KeepAlive, TransportMessage}; -use zenoh_result::{zerror, ZResult}; -use zenoh_sync::{RecyclingObject, RecyclingObjectPool}; -#[cfg(feature = "stats")] -use {crate::common::stats::TransportStats, std::sync::Arc}; #[derive(Clone)] pub(super) struct TransportLinkUnicastUniversal { diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 3edf57f507..f97f29b0c7 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -11,16 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastUniversal; -use crate::{ - common::{ - batch::{Decode, RBatch}, - priority::TransportChannelRx, - }, - unicast::transport_unicast_inner::TransportUnicastTrait, - TransportPeerEventHandler, -}; use std::sync::MutexGuard; + use zenoh_core::{zlock, zread}; use zenoh_link::Link; use zenoh_protocol::{ @@ -30,6 +22,16 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use super::transport::TransportUnicastUniversal; +use crate::{ + common::{ + batch::{Decode, RBatch}, + priority::TransportChannelRx, + }, + unicast::transport_unicast_inner::TransportUnicastTrait, + TransportPeerEventHandler, +}; + /*************************************/ /* TRANSPORT RX */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 5f581673e9..52b4769e82 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -11,6 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + fmt::DebugStruct, + sync::{Arc, RwLock}, + time::Duration, +}; + +use async_trait::async_trait; +use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; +use zenoh_core::{zasynclock, zcondfeat, zread, zwrite}; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{Priority, WhatAmI, ZenohId}, + network::NetworkMessage, + transport::{close, Close, PrioritySn, TransportMessage, TransportSn}, +}; +use zenoh_result::{bail, zerror, ZResult}; + #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ @@ -23,19 +40,6 @@ use crate::{ }, TransportManager, TransportPeerEventHandler, }; -use async_trait::async_trait; -use std::fmt::DebugStruct; -use std::sync::{Arc, RwLock}; -use std::time::Duration; -use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; -use zenoh_core::{zasynclock, zcondfeat, zread, zwrite}; -use zenoh_link::Link; -use zenoh_protocol::{ - core::{Priority, WhatAmI, ZenohId}, - network::NetworkMessage, - transport::{close, Close, PrioritySn, TransportMessage, TransportSn}, -}; -use zenoh_result::{bail, zerror, ZResult}; macro_rules! zlinkget { ($guard:expr, $link:expr) => { diff --git a/io/zenoh-transport/src/unicast/universal/tx.rs b/io/zenoh-transport/src/unicast/universal/tx.rs index a381bb4d29..f7754489ef 100644 --- a/io/zenoh-transport/src/unicast/universal/tx.rs +++ b/io/zenoh-transport/src/unicast/universal/tx.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastUniversal; use zenoh_core::zread; use zenoh_protocol::network::NetworkMessage; +use super::transport::TransportUnicastUniversal; #[cfg(feature = "shared-memory")] use crate::shm::map_zmsg_to_partner; diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index daf79d3e98..e765165a81 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{any::Any, convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::{EndPoint, Link}; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index d5eb62c961..124dfeaad8 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -24,6 +24,7 @@ mod tests { }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index d0bb603836..e1d5bfc52c 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -25,6 +25,7 @@ mod tests { }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/transport_whitelist.rs b/io/zenoh-transport/tests/transport_whitelist.rs index a859a1c0c9..4ace68a87b 100644 --- a/io/zenoh-transport/tests/transport_whitelist.rs +++ b/io/zenoh-transport/tests/transport_whitelist.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{any::Any, convert::TryFrom, iter::FromIterator, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index abcf011eed..b25fb77a63 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{any::Any, sync::Arc, time::Duration}; + use zenoh_core::{zasyncwrite, ztimeout}; use zenoh_link::Link; use zenoh_protocol::{ @@ -20,11 +21,9 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{ - multicast::TransportMulticast, unicast::establishment::ext::auth::Auth, - TransportMulticastEventHandler, -}; -use zenoh_transport::{ - unicast::TransportUnicast, DummyTransportPeerEventHandler, TransportEventHandler, + multicast::TransportMulticast, + unicast::{establishment::ext::auth::Auth, TransportUnicast}, + DummyTransportPeerEventHandler, TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index 6f80e7dd58..df122eeedd 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -13,16 +13,17 @@ // #[cfg(feature = "transport_compression")] mod tests { - use std::fmt::Write as _; use std::{ any::Any, convert::TryFrom, + fmt::Write as _, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index dc4c0fbd3d..9c9b58acde 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -10,11 +10,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::any::Any; -use std::convert::TryFrom; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + any::Any, + convert::TryFrom, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use tokio::sync::Barrier; use zenoh_core::ztimeout; use zenoh_link::Link; diff --git a/io/zenoh-transport/tests/unicast_defragmentation.rs b/io/zenoh-transport/tests/unicast_defragmentation.rs index 40a513b874..28b085ab39 100644 --- a/io/zenoh-transport/tests/unicast_defragmentation.rs +++ b/io/zenoh-transport/tests/unicast_defragmentation.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_protocol::{ core::{ diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index 14670bf532..9830820cf1 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -11,12 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::any::Any; -use std::convert::TryFrom; -use std::io::Write; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + any::Any, + convert::TryFrom, + io::Write, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index c06485fd06..d9337b790d 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -14,6 +14,7 @@ #[cfg(feature = "transport_multilink")] mod tests { use std::{convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::EndPoint; use zenoh_protocol::core::{WhatAmI, ZenohId}; diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index 3c46fc9a80..03af046a3d 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::EndPoint; use zenoh_protocol::core::{WhatAmI, ZenohId}; @@ -22,7 +23,6 @@ use zenoh_transport::{ DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; - #[cfg(target_os = "linux")] #[cfg(any(feature = "transport_tcp", feature = "transport_udp"))] use zenoh_util::net::get_ipv4_ipaddrs; diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index fa7f68a8a9..c7e468b5c5 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -11,15 +11,19 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::any::Any; -use std::convert::TryFrom; -use std::fmt::Write as _; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + any::Any, + convert::TryFrom, + fmt::Write as _, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use zenoh_core::ztimeout; use zenoh_link::Link; -use zenoh_protocol::network::NetworkBody; use zenoh_protocol::{ core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, network::{ @@ -27,7 +31,7 @@ use zenoh_protocol::{ ext::{NodeIdType, QoSType}, Push, }, - NetworkMessage, + NetworkBody, NetworkMessage, }, zenoh::Put, }; diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 637f9f8a86..f7b884f6b9 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -22,6 +22,7 @@ mod tests { }, time::Duration, }; + use zenoh_buffers::buffer::SplitBuffer; use zenoh_core::ztimeout; use zenoh_link::Link; diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 92267458f0..8f9b23a6f1 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -13,11 +13,16 @@ // #[cfg(target_family = "unix")] mod tests { - use std::any::Any; - use std::convert::TryFrom; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Arc; - use std::time::Duration; + use std::{ + any::Any, + convert::TryFrom, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs index 5b9209ada3..efe8842c12 100644 --- a/io/zenoh-transport/tests/unicast_time.rs +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -16,6 +16,7 @@ use std::{ sync::Arc, time::{Duration, Instant}, }; + use zenoh_core::ztimeout; use zenoh_link::EndPoint; use zenoh_protocol::core::{WhatAmI, ZenohId}; diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 4b833bc5e7..8fed09e8c2 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -11,16 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::fmt::Write as _; use std::{ any::Any, convert::TryFrom, + fmt::Write as _, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ @@ -1158,6 +1159,7 @@ async fn transport_unicast_tls_only_mutual_success() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { use std::vec; + use zenoh_link::tls::config::*; zenoh_util::try_init_log_from_env(); @@ -1373,6 +1375,7 @@ async fn transport_unicast_quic_only_mutual_success() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_quic_only_mutual_no_client_certs_failure() { use std::vec; + use zenoh_link::quic::config::*; zenoh_util::try_init_log_from_env(); diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 3663f3249e..13cc427268 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::collections::{hash_map::Entry, HashMap}; + use async_std::sync::RwLock; use async_trait::async_trait; -use std::collections::{hash_map::Entry, HashMap}; use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index 096255fb59..9f5e9bb25a 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -11,13 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{convert::TryFrom, time::Duration}; + use const_format::concatcp; use derive_more::{AsMut, AsRef}; use schemars::JsonSchema; use serde_json::{Map, Value}; -use std::convert::TryFrom; -use std::time::Duration; -use zenoh::{core::Result as ZResult, key_expr::keyexpr, key_expr::OwnedKeyExpr}; +use zenoh::{ + core::Result as ZResult, + key_expr::{keyexpr, OwnedKeyExpr}, +}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 761f653064..61d70b28b1 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -123,10 +123,12 @@ use async_trait::async_trait; use const_format::concatcp; -use zenoh::core::Result as ZResult; -use zenoh::key_expr::{keyexpr, OwnedKeyExpr}; -use zenoh::time::Timestamp; -use zenoh::value::Value; +use zenoh::{ + core::Result as ZResult, + key_expr::{keyexpr, OwnedKeyExpr}, + time::Timestamp, + value::Value, +}; use zenoh_plugin_trait::{PluginControl, PluginInstance, PluginStatusRec, StructVersion}; use zenoh_util::concat_enabled_features; diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 3c84e039a8..41e88fb417 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -13,20 +13,25 @@ // #![recursion_limit = "256"] -use futures::select; -use std::borrow::Cow; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::sync::{ - atomic::{AtomicBool, Ordering::Relaxed}, - Arc, Mutex, +use std::{ + borrow::Cow, + collections::HashMap, + convert::TryFrom, + sync::{ + atomic::{AtomicBool, Ordering::Relaxed}, + Arc, Mutex, + }, }; + +use futures::select; use tracing::{debug, info}; -use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::runtime::Runtime; -use zenoh::sample::Sample; -use zenoh::session::SessionDeclarations; +use zenoh::{ + key_expr::{keyexpr, KeyExpr}, + plugins::{RunningPluginTrait, ZenohPlugin}, + runtime::Runtime, + sample::Sample, + session::SessionDeclarations, +}; use zenoh_core::zlock; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, ZResult}; diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 59562391ea..c1c8f69ce7 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -11,14 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{arg, Command}; use std::time::Duration; -use zenoh::config::Config; -use zenoh::core::try_init_log_from_env; -use zenoh::key_expr::keyexpr; -use zenoh::publication::CongestionControl; -use zenoh::sample::QoSBuilderTrait; -use zenoh::session::SessionDeclarations; + +use clap::{arg, Command}; +use zenoh::{ + config::Config, core::try_init_log_from_env, key_expr::keyexpr, publication::CongestionControl, + sample::QoSBuilderTrait, session::SessionDeclarations, +}; const HTML: &str = r#"
diff --git a/plugins/zenoh-plugin-rest/src/config.rs b/plugins/zenoh-plugin-rest/src/config.rs index 56b9960467..719dc79fbf 100644 --- a/plugins/zenoh-plugin-rest/src/config.rs +++ b/plugins/zenoh-plugin-rest/src/config.rs @@ -11,11 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use schemars::JsonSchema; -use serde::de::{Unexpected, Visitor}; -use serde::{de, Deserialize, Deserializer}; use std::fmt; +use schemars::JsonSchema; +use serde::{ + de, + de::{Unexpected, Visitor}, + Deserialize, Deserializer, +}; + const DEFAULT_HTTP_INTERFACE: &str = "[::]"; #[derive(JsonSchema, Deserialize, serde::Serialize, Clone, Debug)] diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index c712a1add6..4dd30f9a5f 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -17,29 +17,27 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use std::{borrow::Cow, convert::TryFrom, str::FromStr, sync::Arc}; + use async_std::prelude::FutureExt; use base64::Engine; use futures::StreamExt; use http_types::Method; use serde::{Deserialize, Serialize}; -use std::borrow::Cow; -use std::convert::TryFrom; -use std::str::FromStr; -use std::sync::Arc; -use tide::http::Mime; -use tide::sse::Sender; -use tide::{Request, Response, Server, StatusCode}; -use zenoh::bytes::{StringOrBase64, ZBytes}; -use zenoh::core::try_init_log_from_env; -use zenoh::encoding::Encoding; -use zenoh::key_expr::{keyexpr, KeyExpr}; -use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::query::{QueryConsolidation, Reply}; -use zenoh::runtime::Runtime; -use zenoh::sample::{Sample, SampleKind, ValueBuilderTrait}; -use zenoh::selector::{Selector, TIME_RANGE_KEY}; -use zenoh::session::{Session, SessionDeclarations}; -use zenoh::value::Value; +use tide::{http::Mime, sse::Sender, Request, Response, Server, StatusCode}; +use zenoh::{ + bytes::{StringOrBase64, ZBytes}, + core::try_init_log_from_env, + encoding::Encoding, + key_expr::{keyexpr, KeyExpr}, + plugins::{RunningPluginTrait, ZenohPlugin}, + query::{QueryConsolidation, Reply}, + runtime::Runtime, + sample::{Sample, SampleKind, ValueBuilderTrait}, + selector::{Selector, TIME_RANGE_KEY}, + session::{Session, SessionDeclarations}, + value::Value, +}; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; use zenoh_result::{bail, zerror, ZResult}; diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index dcdefda406..1bb8af4330 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -11,13 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::storages_mgt::*; -use flume::Sender; use std::sync::Arc; -use zenoh::core::Result as ZResult; -use zenoh::session::Session; -use zenoh_backend_traits::config::StorageConfig; -use zenoh_backend_traits::{Capability, VolumeInstance}; + +use flume::Sender; +use zenoh::{core::Result as ZResult, session::Session}; +use zenoh_backend_traits::{config::StorageConfig, Capability, VolumeInstance}; + +use super::storages_mgt::*; pub struct StoreIntercept { pub storage: Box, diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 8818d44688..3415f6db65 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -19,34 +19,32 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) #![recursion_limit = "512"] +use std::{ + collections::HashMap, + convert::TryFrom, + sync::{Arc, Mutex}, +}; + use async_std::task; use flume::Sender; use memory_backend::MemoryBackend; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::sync::Arc; -use std::sync::Mutex; use storages_mgt::StorageMessage; -use zenoh::core::try_init_log_from_env; -use zenoh::core::Result as ZResult; -use zenoh::internal::zlock; -use zenoh::internal::LibLoader; -use zenoh::key_expr::keyexpr; -use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::runtime::Runtime; -use zenoh::selector::Selector; -use zenoh::session::Session; -use zenoh_backend_traits::config::ConfigDiff; -use zenoh_backend_traits::config::PluginConfig; -use zenoh_backend_traits::config::StorageConfig; -use zenoh_backend_traits::config::VolumeConfig; -use zenoh_backend_traits::VolumeInstance; -use zenoh_plugin_trait::plugin_long_version; -use zenoh_plugin_trait::plugin_version; -use zenoh_plugin_trait::Plugin; -use zenoh_plugin_trait::PluginControl; -use zenoh_plugin_trait::PluginReport; -use zenoh_plugin_trait::PluginStatusRec; +use zenoh::{ + core::{try_init_log_from_env, Result as ZResult}, + internal::{zlock, LibLoader}, + key_expr::keyexpr, + plugins::{RunningPluginTrait, ZenohPlugin}, + runtime::Runtime, + selector::Selector, + session::Session, +}; +use zenoh_backend_traits::{ + config::{ConfigDiff, PluginConfig, StorageConfig, VolumeConfig}, + VolumeInstance, +}; +use zenoh_plugin_trait::{ + plugin_long_version, plugin_version, Plugin, PluginControl, PluginReport, PluginStatusRec, +}; mod backends_mgt; use backends_mgt::*; diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index 60982c350d..1534d95e32 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -11,16 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{collections::HashMap, sync::Arc}; + use async_std::sync::RwLock; use async_trait::async_trait; -use std::collections::HashMap; -use std::sync::Arc; -use zenoh::core::Result as ZResult; -use zenoh::key_expr::OwnedKeyExpr; -use zenoh::time::Timestamp; -use zenoh::value::Value; -use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; -use zenoh_backend_traits::*; +use zenoh::{core::Result as ZResult, key_expr::OwnedKeyExpr, time::Timestamp, value::Value}; +use zenoh_backend_traits::{ + config::{StorageConfig, VolumeConfig}, + *, +}; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; use crate::MEMORY_BACKEND_NAME; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 694e259a18..50c93fe3dd 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -11,15 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::digest::*; -use super::Snapshotter; +use std::{ + cmp::Ordering, + collections::{BTreeSet, HashMap, HashSet}, + str, + str::FromStr, +}; + use async_std::sync::Arc; -use std::cmp::Ordering; -use std::collections::{BTreeSet, HashMap, HashSet}; -use std::str; -use std::str::FromStr; use zenoh::prelude::*; +use super::{digest::*, Snapshotter}; + pub struct AlignQueryable { session: Arc, digest_key: OwnedKeyExpr, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 46ccdc2935..e0301f1a4e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -12,14 +12,17 @@ // ZettaScale Zenoh Team, // -use super::{Digest, EraType, LogEntry, Snapshotter}; -use super::{CONTENTS, ERA, INTERVALS, SUBINTERVALS}; +use std::{ + collections::{HashMap, HashSet}, + str, +}; + use async_std::sync::{Arc, RwLock}; use flume::{Receiver, Sender}; -use std::collections::{HashMap, HashSet}; -use std::str; use zenoh::prelude::*; +use super::{Digest, EraType, LogEntry, Snapshotter, CONTENTS, ERA, INTERVALS, SUBINTERVALS}; + pub struct Aligner { session: Arc, digest_key: OwnedKeyExpr, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs index c70f26ea1f..bf06c61f25 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs @@ -12,16 +12,18 @@ // ZettaScale Zenoh Team, // +use std::{ + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + convert::TryFrom, + str::FromStr, + string::ParseError, + time::Duration, +}; + use crc::{Crc, CRC_64_ECMA_182}; use derive_new::new; use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::convert::TryFrom; -use std::str::FromStr; -use std::string::ParseError; -use std::time::Duration; -use zenoh::key_expr::OwnedKeyExpr; -use zenoh::time::Timestamp; +use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp}; #[derive(Eq, PartialEq, Clone, Debug, Deserialize, Serialize)] pub struct DigestConfig { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index c9d9e03bcf..421d45ade6 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -14,21 +14,25 @@ // This module extends Storage with alignment protocol that aligns storages subscribing to the same key_expr -use crate::backends_mgt::StoreIntercept; -use crate::storages_mgt::StorageMessage; -use async_std::stream::{interval, StreamExt}; -use async_std::sync::Arc; -use async_std::sync::RwLock; +use std::{ + collections::{HashMap, HashSet}, + str, + str::FromStr, + time::{Duration, SystemTime}, +}; + +use async_std::{ + stream::{interval, StreamExt}, + sync::{Arc, RwLock}, +}; use flume::{Receiver, Sender}; use futures::{pin_mut, select, FutureExt}; -use std::collections::{HashMap, HashSet}; -use std::str; -use std::str::FromStr; -use std::time::{Duration, SystemTime}; use urlencoding::encode; use zenoh::prelude::*; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; +use crate::{backends_mgt::StoreIntercept, storages_mgt::StorageMessage}; + pub mod align_queryable; pub mod aligner; pub mod digest; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index e66a6e88ca..d5708686ee 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -11,20 +11,24 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{Digest, DigestConfig, LogEntry}; -use async_std::stream::{interval, StreamExt}; -use async_std::sync::Arc; -use async_std::sync::RwLock; -use async_std::task::sleep; +use std::{ + collections::{HashMap, HashSet}, + convert::TryFrom, + time::Duration, +}; + +use async_std::{ + stream::{interval, StreamExt}, + sync::{Arc, RwLock}, + task::sleep, +}; use flume::Receiver; use futures::join; -use std::collections::{HashMap, HashSet}; -use std::convert::TryFrom; -use std::time::Duration; -use zenoh::key_expr::OwnedKeyExpr; -use zenoh::time::Timestamp; +use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp}; use zenoh_backend_traits::config::ReplicaConfig; +use super::{Digest, DigestConfig, LogEntry}; + pub struct Snapshotter { // channel to get updates from the storage storage_update: Receiver<(OwnedKeyExpr, Timestamp)>, diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 476893539e..bd7d56f7fc 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -11,35 +11,39 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::backends_mgt::StoreIntercept; -use crate::storages_mgt::StorageMessage; -use async_std::sync::Arc; -use async_std::sync::{Mutex, RwLock}; +use std::{ + collections::{HashMap, HashSet}, + str::{self, FromStr}, + time::{SystemTime, UNIX_EPOCH}, +}; + +use async_std::sync::{Arc, Mutex, RwLock}; use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; -use std::collections::{HashMap, HashSet}; -use std::str::{self, FromStr}; -use std::time::{SystemTime, UNIX_EPOCH}; -use zenoh::buffers::SplitBuffer; -use zenoh::buffers::ZBuf; -use zenoh::internal::bail; -use zenoh::internal::{zenoh_home, Timed, TimedEvent, Timer}; -use zenoh::key_expr::keyexpr_tree::KeyedSetProvider; -use zenoh::key_expr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; -use zenoh::key_expr::keyexpr_tree::{KeBoxTree, NonWild, UnknownWildness}; -use zenoh::key_expr::KeyExpr; -use zenoh::key_expr::OwnedKeyExpr; -use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::{Sample, SampleKind, TimestampBuilderTrait}; -use zenoh::sample::{SampleBuilder, ValueBuilderTrait}; -use zenoh::selector::Selector; -use zenoh::session::SessionDeclarations; -use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; -use zenoh::value::Value; -use zenoh::{core::Result as ZResult, session::Session}; -use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; +use zenoh::{ + buffers::{SplitBuffer, ZBuf}, + core::Result as ZResult, + internal::{bail, zenoh_home, Timed, TimedEvent, Timer}, + key_expr::{ + keyexpr_tree::{ + IKeyExprTree, IKeyExprTreeMut, KeBoxTree, KeyedSetProvider, NonWild, UnknownWildness, + }, + KeyExpr, OwnedKeyExpr, + }, + query::{ConsolidationMode, QueryTarget}, + sample::{Sample, SampleBuilder, SampleKind, TimestampBuilderTrait, ValueBuilderTrait}, + selector::Selector, + session::{Session, SessionDeclarations}, + time::{new_reception_timestamp, Timestamp, NTP64}, + value::Value, +}; +use zenoh_backend_traits::{ + config::{GarbageCollectionConfig, StorageConfig}, + Capability, History, Persistence, StorageInsertionResult, StoredData, +}; + +use crate::{backends_mgt::StoreIntercept, storages_mgt::StorageMessage}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index a77cdd936f..1670310fcf 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -12,8 +12,7 @@ // ZettaScale Zenoh Team, // use async_std::sync::Arc; -use zenoh::core::Result as ZResult; -use zenoh::session::Session; +use zenoh::{core::Result as ZResult, session::Session}; use zenoh_backend_traits::config::StorageConfig; pub use super::replica::{Replica, StorageService}; diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 61ea53deba..dd20c71936 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -16,12 +16,10 @@ // 1. normal case, just some wild card puts and deletes on existing keys and ensure it works // 2. check for dealing with out of order updates -use std::str::FromStr; -use std::thread::sleep; +use std::{str::FromStr, thread::sleep}; use async_std::task; -use zenoh::internal::zasync_executor_init; -use zenoh::prelude::*; +use zenoh::{internal::zasync_executor_init, prelude::*}; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index f2482da8e5..8bafeb9bbe 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -16,13 +16,11 @@ // 1. normal case, just some wild card puts and deletes on existing keys and ensure it works // 2. check for dealing with out of order updates -use std::str::FromStr; -use std::thread::sleep; +use std::{str::FromStr, thread::sleep}; // use std::collections::HashMap; use async_std::task; -use zenoh::internal::zasync_executor_init; -use zenoh::prelude::*; +use zenoh::{internal::zasync_executor_init, prelude::*}; use zenoh_plugin_trait::Plugin; async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { diff --git a/plugins/zenoh-plugin-trait/src/manager.rs b/plugins/zenoh-plugin-trait/src/manager.rs index a205c3972d..2f5336d1fc 100644 --- a/plugins/zenoh-plugin-trait/src/manager.rs +++ b/plugins/zenoh-plugin-trait/src/manager.rs @@ -13,7 +13,6 @@ mod dynamic_plugin; mod static_plugin; -use crate::*; use zenoh_keyexpr::keyexpr; use zenoh_result::ZResult; use zenoh_util::LibLoader; @@ -22,6 +21,7 @@ use self::{ dynamic_plugin::{DynamicPlugin, DynamicPluginSource}, static_plugin::StaticPlugin, }; +use crate::*; pub trait DeclaredPlugin: PluginStatus { fn as_status(&self) -> &dyn PluginStatus; diff --git a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs index a8a78306ea..89a0032fc1 100644 --- a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs @@ -10,13 +10,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::*; use std::path::{Path, PathBuf}; use libloading::Library; use zenoh_result::{bail, ZResult}; use zenoh_util::LibLoader; +use crate::*; + /// This enum contains information where to load the plugin from. pub enum DynamicPluginSource { /// Load plugin with the name in String + `.so | .dll | .dylib` diff --git a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs index c275fb9818..3841f50a86 100644 --- a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::*; use std::marker::PhantomData; + use zenoh_result::ZResult; +use crate::*; + pub struct StaticPlugin where P: Plugin, diff --git a/plugins/zenoh-plugin-trait/src/plugin.rs b/plugins/zenoh-plugin-trait/src/plugin.rs index 6911d614d5..703f4fb0b1 100644 --- a/plugins/zenoh-plugin-trait/src/plugin.rs +++ b/plugins/zenoh-plugin-trait/src/plugin.rs @@ -11,12 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::StructVersion; -use serde::{Deserialize, Serialize}; use std::{borrow::Cow, ops::BitOrAssign}; + +use serde::{Deserialize, Serialize}; use zenoh_keyexpr::keyexpr; use zenoh_result::ZResult; +use crate::StructVersion; + /// The plugin can be in one of these states: /// - Declared: the plugin is declared in the configuration file, but not loaded yet or failed to load /// - Loaded: the plugin is loaded, but not started yet or failed to start diff --git a/zenoh-ext/examples/examples/z_member.rs b/zenoh-ext/examples/examples/z_member.rs index 35513b1b56..783ee97a9e 100644 --- a/zenoh-ext/examples/examples/z_member.rs +++ b/zenoh-ext/examples/examples/z_member.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{sync::Arc, time::Duration}; + use futures::StreamExt; -use std::sync::Arc; -use std::time::Duration; use zenoh::prelude::*; use zenoh_ext::group::*; diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 09c888cb0b..684cc7cb75 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -11,10 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{arg, Parser}; use std::time::Duration; -use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::*; + +use clap::{arg, Parser}; +use zenoh::{ + config::{Config, ModeDependentValue}, + prelude::*, +}; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; diff --git a/zenoh-ext/examples/examples/z_query_sub.rs b/zenoh-ext/examples/examples/z_query_sub.rs index a735ecec66..2fa077eba1 100644 --- a/zenoh-ext/examples/examples/z_query_sub.rs +++ b/zenoh-ext/examples/examples/z_query_sub.rs @@ -11,10 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::arg; -use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::*; +use clap::{arg, Parser}; +use zenoh::{config::Config, prelude::*}; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; diff --git a/zenoh-ext/examples/examples/z_view_size.rs b/zenoh-ext/examples/examples/z_view_size.rs index 52e78790bb..fd8220d506 100644 --- a/zenoh-ext/examples/examples/z_view_size.rs +++ b/zenoh-ext/examples/examples/z_view_size.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{sync::Arc, time::Duration}; + use clap::{arg, Parser}; -use std::sync::Arc; -use std::time::Duration; use zenoh::config::Config; use zenoh_ext::group::*; use zenoh_ext_examples::CommonArgs; diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index d764e5ed9c..44600b038c 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -14,18 +14,22 @@ //! To manage groups and group memeberships +use std::{ + collections::HashMap, + convert::TryInto, + ops::Add, + sync::Arc, + time::{Duration, Instant}, +}; + use flume::{Receiver, Sender}; -use futures::prelude::*; -use futures::select; +use futures::{prelude::*, select}; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::convert::TryInto; -use std::ops::Add; -use std::sync::Arc; -use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use zenoh::internal::{bail, Condition, TaskController}; -use zenoh::prelude::*; +use zenoh::{ + internal::{bail, Condition, TaskController}, + prelude::*, +}; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 41eea0b074..9802d04e3a 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -21,11 +21,8 @@ pub use querying_subscriber::{ FetchingSubscriber, FetchingSubscriberBuilder, QueryingSubscriberBuilder, }; pub use session_ext::SessionExt; -pub use subscriber_ext::SubscriberBuilderExt; -pub use subscriber_ext::SubscriberForward; -use zenoh::internal::zerror; -use zenoh::query::Reply; -use zenoh::{core::Result as ZResult, sample::Sample}; +pub use subscriber_ext::{SubscriberBuilderExt, SubscriberForward}; +use zenoh::{core::Result as ZResult, internal::zerror, query::Reply, sample::Sample}; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 11fb8fb72a..1796668f1c 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -11,21 +11,24 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::collections::{HashMap, VecDeque}; -use std::convert::TryInto; -use std::future::{IntoFuture, Ready}; -use std::time::Duration; -use zenoh::core::Error; -use zenoh::core::{Resolvable, Resolve}; -use zenoh::internal::{ResolveFuture, TerminatableTask}; -use zenoh::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; -use zenoh::prelude::Wait; -use zenoh::queryable::{Query, Queryable}; -use zenoh::runtime::ZRuntime; -use zenoh::sample::{Locality, Sample}; -use zenoh::session::{SessionDeclarations, SessionRef}; -use zenoh::subscriber::FlumeSubscriber; -use zenoh::{core::Result as ZResult, internal::bail}; +use std::{ + collections::{HashMap, VecDeque}, + convert::TryInto, + future::{IntoFuture, Ready}, + time::Duration, +}; + +use zenoh::{ + core::{Error, Resolvable, Resolve, Result as ZResult}, + internal::{bail, ResolveFuture, TerminatableTask}, + key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, + prelude::Wait, + queryable::{Query, Queryable}, + runtime::ZRuntime, + sample::{Locality, Sample}, + session::{SessionDeclarations, SessionRef}, + subscriber::FlumeSubscriber, +}; /// The builder of PublicationCache, allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 6febef7395..e76c6f7f5c 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -11,24 +11,28 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::collections::{btree_map, BTreeMap, VecDeque}; -use std::convert::TryInto; -use std::future::{IntoFuture, Ready}; -use std::mem::swap; -use std::sync::{Arc, Mutex}; -use std::time::Duration; -use zenoh::core::{Resolvable, Resolve}; -use zenoh::handlers::{locked, DefaultHandler, IntoHandler}; -use zenoh::internal::zlock; -use zenoh::key_expr::KeyExpr; -use zenoh::prelude::Wait; -use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}; -use zenoh::selector::Selector; -use zenoh::session::{SessionDeclarations, SessionRef}; -use zenoh::subscriber::{Reliability, Subscriber}; -use zenoh::time::{new_reception_timestamp, Timestamp}; -use zenoh::{core::Error, core::Result as ZResult}; +use std::{ + collections::{btree_map, BTreeMap, VecDeque}, + convert::TryInto, + future::{IntoFuture, Ready}, + mem::swap, + sync::{Arc, Mutex}, + time::Duration, +}; + +use zenoh::{ + core::{Error, Resolvable, Resolve, Result as ZResult}, + handlers::{locked, DefaultHandler, IntoHandler}, + internal::zlock, + key_expr::KeyExpr, + prelude::Wait, + query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, + sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}, + selector::Selector, + session::{SessionDeclarations, SessionRef}, + subscriber::{Reliability, Subscriber}, + time::{new_reception_timestamp, Timestamp}, +}; use crate::ExtractSample; diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index d005cafc86..3b33bc9b16 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -11,15 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::PublicationCacheBuilder; -use std::convert::TryInto; -use std::sync::Arc; +use std::{convert::TryInto, sync::Arc}; + use zenoh::{ core::Error, key_expr::KeyExpr, session::{Session, SessionRef}, }; +use super::PublicationCacheBuilder; + /// Some extensions to the [`zenoh::Session`](zenoh::Session) pub trait SessionExt<'s, 'a> { fn declare_publication_cache<'b, 'c, TryIntoKeyExpr>( diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 8c3b1239b6..81c969a223 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -11,21 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::time::Duration; + use flume::r#async::RecvStream; use futures::stream::{Forward, Map}; -use std::time::Duration; -use zenoh::core::Result as ZResult; -use zenoh::query::ReplyKeyExpr; -use zenoh::sample::Locality; use zenoh::{ + core::Result as ZResult, liveliness::LivelinessSubscriberBuilder, - query::{QueryConsolidation, QueryTarget}, - sample::Sample, + query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, + sample::{Locality, Sample}, subscriber::{Reliability, Subscriber, SubscriberBuilder}, }; -use crate::ExtractSample; -use crate::{querying_subscriber::QueryingSubscriberBuilder, FetchingSubscriberBuilder}; +use crate::{ + querying_subscriber::QueryingSubscriberBuilder, ExtractSample, FetchingSubscriberBuilder, +}; /// Allows writing `subscriber.forward(receiver)` instead of `subscriber.stream().map(Ok).forward(publisher)` pub trait SubscriberForward<'a, S> { diff --git a/zenoh/src/api/admin.rs b/zenoh/src/api/admin.rs index e720fde1c3..6e7605e95b 100644 --- a/zenoh/src/api/admin.rs +++ b/zenoh/src/api/admin.rs @@ -11,20 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - bytes::ZBytes, - encoding::Encoding, - key_expr::KeyExpr, - queryable::Query, - sample::Locality, - sample::{DataInfo, SampleKind}, - session::Session, -}; use std::{ collections::hash_map::DefaultHasher, hash::{Hash, Hasher}, sync::Arc, }; + use zenoh_core::{Result as ZResult, Wait}; use zenoh_keyexpr::keyexpr; use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; @@ -32,6 +24,15 @@ use zenoh_transport::{ TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; +use super::{ + bytes::ZBytes, + encoding::Encoding, + key_expr::KeyExpr, + queryable::Query, + sample::{DataInfo, Locality, SampleKind}, + session::Session, +}; + macro_rules! ke_for_sure { ($val:expr) => { unsafe { keyexpr::from_str_unchecked($val) } diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index 5285825b29..d4dc1b54d2 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -11,24 +11,27 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::builders::sample::SampleBuilderTrait; -use crate::api::builders::sample::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; +use std::future::{IntoFuture, Ready}; + +use zenoh_core::{Resolvable, Result as ZResult, Wait}; +use zenoh_protocol::{core::CongestionControl, network::Mapping}; + #[cfg(feature = "unstable")] use crate::api::bytes::OptionZBytes; -use crate::api::bytes::ZBytes; -use crate::api::key_expr::KeyExpr; -use crate::api::publication::Priority; -use crate::api::sample::Locality; -use crate::api::sample::SampleKind; #[cfg(feature = "unstable")] use crate::api::sample::SourceInfo; -use crate::api::session::SessionRef; -use crate::api::value::Value; -use crate::api::{encoding::Encoding, publication::Publisher}; -use std::future::{IntoFuture, Ready}; -use zenoh_core::{Resolvable, Result as ZResult, Wait}; -use zenoh_protocol::core::CongestionControl; -use zenoh_protocol::network::Mapping; +use crate::api::{ + builders::sample::{ + QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + }, + bytes::ZBytes, + encoding::Encoding, + key_expr::KeyExpr, + publication::{Priority, Publisher}, + sample::{Locality, SampleKind}, + session::SessionRef, + value::Value, +}; pub type SessionPutBuilder<'a, 'b> = PublicationBuilder, PublicationBuilderPut>; diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs index 0335949b82..56ae8c6c1b 100644 --- a/zenoh/src/api/builders/sample.rs +++ b/zenoh/src/api/builders/sample.rs @@ -11,22 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::bytes::ZBytes; -use crate::api::encoding::Encoding; -use crate::api::key_expr::KeyExpr; -use crate::api::publication::Priority; -use crate::api::sample::QoS; -use crate::api::sample::QoSBuilder; -use crate::api::sample::Sample; -use crate::api::sample::SampleKind; -use crate::api::value::Value; -#[cfg(feature = "unstable")] -use crate::{api::bytes::OptionZBytes, sample::SourceInfo}; use std::marker::PhantomData; + use uhlc::Timestamp; use zenoh_core::zresult; use zenoh_protocol::core::CongestionControl; +use crate::api::{ + bytes::ZBytes, + encoding::Encoding, + key_expr::KeyExpr, + publication::Priority, + sample::{QoS, QoSBuilder, Sample, SampleKind}, + value::Value, +}; +#[cfg(feature = "unstable")] +use crate::{api::bytes::OptionZBytes, sample::SourceInfo}; + pub trait QoSBuilderTrait { /// Change the `congestion_control` to apply when routing the data. fn congestion_control(self, congestion_control: CongestionControl) -> Self; diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index c36136ef81..fb32910b54 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -13,11 +13,11 @@ // //! ZBytes primitives. -use crate::buffers::ZBuf; use std::{ borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, str::Utf8Error, string::FromUtf8Error, sync::Arc, }; + use unwrap_infallible::UnwrapInfallible; use zenoh_buffers::{ buffer::{Buffer, SplitBuffer}, @@ -37,6 +37,8 @@ use zenoh_shm::{ SharedMemoryBuf, }; +use crate::buffers::ZBuf; + /// Trait to encode a type `T` into a [`Value`]. pub trait Serialize { type Output; @@ -1825,12 +1827,11 @@ impl From> for ZBytes { mod tests { #[test] fn serializer() { - use super::ZBytes; - use rand::Rng; use std::borrow::Cow; + + use rand::Rng; use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::Properties; - #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::api::{ protocol_implementations::posix::{ @@ -1841,6 +1842,8 @@ mod tests { slice::zsliceshm::{zsliceshm, ZSliceShm}, }; + use super::ZBytes; + const NUM: usize = 1_000; macro_rules! serialize_deserialize { diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 6c08303612..f1be92c7ac 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -11,14 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::bytes::ZBytes; -use phf::phf_map; use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; + +use phf::phf_map; use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::EncodingId; #[cfg(feature = "shared-memory")] use zenoh_shm::api::slice::{zsliceshm::ZSliceShm, zsliceshmmut::ZSliceShmMut}; +use super::bytes::ZBytes; + /// Default encoding values used by Zenoh. /// /// An encoding has a similar role to Content-type in HTTP: it indicates, when present, how data should be interpreted by the application. diff --git a/zenoh/src/api/handlers/ring.rs b/zenoh/src/api/handlers/ring.rs index 755c6d9bce..77ad867d36 100644 --- a/zenoh/src/api/handlers/ring.rs +++ b/zenoh/src/api/handlers/ring.rs @@ -13,13 +13,14 @@ // //! Callback handler trait. -use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; - -use super::{callback::Callback, Dyn, IntoHandler}; use std::sync::{Arc, Weak}; + use zenoh_collections::RingBuffer; use zenoh_result::ZResult; +use super::{callback::Callback, Dyn, IntoHandler}; +use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; + /// A synchrounous ring channel with a limited size that allows users to keep the last N data. pub struct RingChannel { capacity: usize, diff --git a/zenoh/src/api/info.rs b/zenoh/src/api/info.rs index 205a412142..0c75252a78 100644 --- a/zenoh/src/api/info.rs +++ b/zenoh/src/api/info.rs @@ -13,11 +13,13 @@ // //! Tools to access information about the current zenoh [`Session`](crate::Session). -use super::session::SessionRef; use std::future::{IntoFuture, Ready}; + use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::core::{WhatAmI, ZenohId}; +use super::session::SessionRef; + /// A builder retuned by [`SessionInfo::zid()`](SessionInfo::zid) that allows /// to access the [`ZenohId`] of the current zenoh [`Session`](crate::Session). /// diff --git a/zenoh/src/api/key_expr.rs b/zenoh/src/api/key_expr.rs index 20dcf9cbee..c5fdf12609 100644 --- a/zenoh/src/api/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -12,14 +12,12 @@ // ZettaScale Zenoh Team, // -use super::session::{Session, Undeclarable}; -use crate::net::primitives::Primitives; -use std::future::IntoFuture; use std::{ convert::{TryFrom, TryInto}, - future::Ready, + future::{IntoFuture, Ready}, str::FromStr, }; + use zenoh_core::{Resolvable, Wait}; use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::{ @@ -28,6 +26,9 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +use super::session::{Session, Undeclarable}; +use crate::net::primitives::Primitives; + #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { Borrowed(&'a keyexpr), diff --git a/zenoh/src/api/liveliness.rs b/zenoh/src/api/liveliness.rs index f7235426c3..640c639dec 100644 --- a/zenoh/src/api/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -12,6 +12,18 @@ // ZettaScale Zenoh Team, // +use std::{ + convert::TryInto, + future::{IntoFuture, Ready}, + sync::Arc, + time::Duration, +}; + +use zenoh_config::unwrap_or_default; +use zenoh_core::{Resolvable, Resolve, Result as ZResult, Wait}; +use zenoh_keyexpr::keyexpr; +use zenoh_protocol::network::{declare::subscriber::ext::SubscriberInfo, request}; + use super::{ handlers::{locked, DefaultHandler, IntoHandler}, key_expr::KeyExpr, @@ -21,13 +33,6 @@ use super::{ subscriber::{Subscriber, SubscriberInner}, Id, }; -use std::future::IntoFuture; -use std::{convert::TryInto, future::Ready, sync::Arc, time::Duration}; -use zenoh_config::unwrap_or_default; -use zenoh_core::{Resolvable, Result as ZResult}; -use zenoh_core::{Resolve, Wait}; -use zenoh_keyexpr::keyexpr; -use zenoh_protocol::network::{declare::subscriber::ext::SubscriberInfo, request}; #[zenoh_macros::unstable] pub(crate) static PREFIX_LIVELINESS: &str = crate::net::routing::PREFIX_LIVELINESS; diff --git a/zenoh/src/api/loader.rs b/zenoh/src/api/loader.rs index e4a28de02e..ad4dac61fb 100644 --- a/zenoh/src/api/loader.rs +++ b/zenoh/src/api/loader.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::plugins::{PluginsManager, PLUGIN_PREFIX}; -use crate::runtime::Runtime; use zenoh_config::{Config, PluginLoad}; use zenoh_result::ZResult; +use super::plugins::{PluginsManager, PLUGIN_PREFIX}; +use crate::runtime::Runtime; + pub(crate) fn load_plugin( plugin_mgr: &mut PluginsManager, name: &str, diff --git a/zenoh/src/api/plugins.rs b/zenoh/src/api/plugins.rs index 27f6f18d7a..b7f1954a6b 100644 --- a/zenoh/src/api/plugins.rs +++ b/zenoh/src/api/plugins.rs @@ -14,8 +14,6 @@ //! `zenohd`'s plugin system. For more details, consult the [detailed documentation](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Plugins/Zenoh%20Plugins.md). -use super::selector::Selector; -use crate::net::runtime::Runtime; use zenoh_core::zconfigurable; use zenoh_plugin_trait::{ Plugin, PluginControl, PluginInstance, PluginReport, PluginStatusRec, StructVersion, @@ -23,6 +21,9 @@ use zenoh_plugin_trait::{ use zenoh_protocol::core::key_expr::keyexpr; use zenoh_result::ZResult; +use super::selector::Selector; +use crate::net::runtime::Runtime; + zconfigurable! { pub static ref PLUGIN_PREFIX: String = "zenoh_plugin_".to_string(); } diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 518ddc4d1b..553170e76a 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -12,26 +12,14 @@ // ZettaScale Zenoh Team, // -use super::{ - builders::publication::{ - PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, - PublisherDeleteBuilder, PublisherPutBuilder, - }, - bytes::ZBytes, - encoding::Encoding, - key_expr::KeyExpr, - sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, - session::{SessionRef, Undeclarable}, -}; -use crate::net::primitives::Primitives; -use futures::Sink; -use std::future::IntoFuture; use std::{ convert::TryFrom, - future::Ready, + future::{IntoFuture, Ready}, pin::Pin, task::{Context, Poll}, }; + +use futures::Sink; use zenoh_core::{zread, Resolvable, Resolve, Wait}; use zenoh_keyexpr::keyexpr; use zenoh_protocol::{ @@ -40,7 +28,6 @@ use zenoh_protocol::{ zenoh::{Del, PushBody, Put}, }; use zenoh_result::{Error, ZResult}; - #[zenoh_macros::unstable] use { crate::api::handlers::{Callback, DefaultHandler, IntoHandler}, @@ -50,6 +37,19 @@ use { zenoh_protocol::core::EntityId, }; +use super::{ + builders::publication::{ + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, + PublisherDeleteBuilder, PublisherPutBuilder, + }, + bytes::ZBytes, + encoding::Encoding, + key_expr::KeyExpr, + sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, + session::{SessionRef, Undeclarable}, +}; +use crate::net::primitives::Primitives; + #[zenoh_macros::unstable] #[derive(Clone)] pub enum PublisherRef<'a> { @@ -1087,16 +1087,19 @@ impl Drop for MatchingListenerInner<'_> { #[cfg(test)] mod tests { - use crate::api::{sample::SampleKind, session::SessionDeclarations}; use zenoh_config::Config; use zenoh_core::Wait; + use crate::api::{sample::SampleKind, session::SessionDeclarations}; + #[test] fn priority_from() { - use super::Priority as APrio; use std::convert::TryInto; + use zenoh_protocol::core::Priority as TPrio; + use super::Priority as APrio; + for i in APrio::MAX as u8..=APrio::MIN as u8 { let p: APrio = i.try_into().unwrap(); diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index 311402b618..e344237087 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -12,6 +12,19 @@ // ZettaScale Zenoh Team, // +use std::{ + collections::HashMap, + future::{IntoFuture, Ready}, + time::Duration, +}; + +use zenoh_core::{Resolvable, Wait}; +use zenoh_keyexpr::OwnedKeyExpr; +use zenoh_protocol::core::{CongestionControl, ZenohId}; +use zenoh_result::ZResult; + +#[zenoh_macros::unstable] +use super::{builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo}; use super::{ builders::sample::{QoSBuilderTrait, ValueBuilderTrait}, bytes::ZBytes, @@ -24,15 +37,6 @@ use super::{ session::Session, value::Value, }; -use std::future::IntoFuture; -use std::{collections::HashMap, future::Ready, time::Duration}; -use zenoh_core::{Resolvable, Wait}; -use zenoh_keyexpr::OwnedKeyExpr; -use zenoh_protocol::core::{CongestionControl, ZenohId}; -use zenoh_result::ZResult; - -#[zenoh_macros::unstable] -use super::{builders::sample::SampleBuilderTrait, bytes::OptionZBytes, sample::SourceInfo}; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index c83b4b6081..e2343811db 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -11,27 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, - bytes::ZBytes, - encoding::Encoding, - handlers::{locked, DefaultHandler, IntoHandler}, - key_expr::KeyExpr, - publication::Priority, - sample::{Locality, QoSBuilder, Sample, SampleKind}, - selector::{Parameters, Selector}, - session::{SessionRef, Undeclarable}, - value::Value, - Id, -}; -use crate::net::primitives::Primitives; -use std::future::IntoFuture; use std::{ fmt, - future::Ready, + future::{IntoFuture, Ready}, ops::{Deref, DerefMut}, sync::Arc, }; + use uhlc::Timestamp; use zenoh_core::{Resolvable, Resolve, Wait}; use zenoh_protocol::{ @@ -40,7 +26,6 @@ use zenoh_protocol::{ zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, }; use zenoh_result::ZResult; - #[zenoh_macros::unstable] use { super::{ @@ -50,6 +35,21 @@ use { zenoh_protocol::core::EntityGlobalId, }; +use super::{ + builders::sample::{QoSBuilderTrait, SampleBuilder, TimestampBuilderTrait, ValueBuilderTrait}, + bytes::ZBytes, + encoding::Encoding, + handlers::{locked, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + publication::Priority, + sample::{Locality, QoSBuilder, Sample, SampleKind}, + selector::{Parameters, Selector}, + session::{SessionRef, Undeclarable}, + value::Value, + Id, +}; +use crate::net::primitives::Primitives; + pub(crate) struct QueryInner { /// The key expression of this Query. pub(crate) key_expr: KeyExpr<'static>, diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index ca2354db85..2551a2a0d9 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -13,18 +13,20 @@ // //! Sample primitives -use super::{ - builders::sample::QoSBuilderTrait, bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, - publication::Priority, value::Value, -}; +use std::{convert::TryFrom, fmt}; + #[cfg(feature = "unstable")] use serde::Serialize; -use std::{convert::TryFrom, fmt}; use zenoh_protocol::{ core::{CongestionControl, EntityGlobalId, Timestamp}, network::declare::ext::QoSType, }; +use super::{ + builders::sample::QoSBuilderTrait, bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, + publication::Priority, value::Value, +}; + pub type SourceSn = u64; /// The locality of samples to be received by subscribers or targeted by publishers. @@ -150,9 +152,10 @@ pub struct SourceInfo { #[test] #[cfg(feature = "unstable")] fn source_info_stack_size() { - use crate::api::sample::{SourceInfo, SourceSn}; use zenoh_protocol::core::ZenohId; + use crate::api::sample::{SourceInfo, SourceSn}; + assert_eq!(std::mem::size_of::(), 16); assert_eq!(std::mem::size_of::>(), 17); assert_eq!(std::mem::size_of::>(), 16); diff --git a/zenoh/src/api/scouting.rs b/zenoh/src/api/scouting.rs index 8e7853a411..8963d37e30 100644 --- a/zenoh/src/api/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -11,17 +11,25 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::api::handlers::{locked, Callback, DefaultHandler, IntoHandler}; -use crate::net::runtime::{orchestrator::Loop, Runtime}; -use std::future::IntoFuture; -use std::time::Duration; -use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; +use std::{ + fmt, + future::{IntoFuture, Ready}, + net::SocketAddr, + ops::Deref, + time::Duration, +}; + use tokio::net::UdpSocket; use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::{core::WhatAmIMatcher, scouting::Hello}; use zenoh_result::ZResult; use zenoh_task::TerminatableTask; +use crate::{ + api::handlers::{locked, Callback, DefaultHandler, IntoHandler}, + net::runtime::{orchestrator::Loop, Runtime}, +}; + /// A builder for initializing a [`Scout`]. /// /// # Examples diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs index 59e52edc62..2dc77dc967 100644 --- a/zenoh/src/api/selector.rs +++ b/zenoh/src/api/selector.rs @@ -13,13 +13,13 @@ // //! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries -use super::{key_expr::KeyExpr, queryable::Query}; use std::{ collections::HashMap, convert::TryFrom, ops::{Deref, DerefMut}, str::FromStr, }; + use zenoh_protocol::core::{ key_expr::{keyexpr, OwnedKeyExpr}, Properties, @@ -29,6 +29,8 @@ use zenoh_result::ZResult; #[cfg(feature = "unstable")] use zenoh_util::time_range::TimeRange; +use super::{key_expr::KeyExpr, queryable::Query}; + /// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the /// set of keys that are relevant to an operation, and a set of parameters /// with a few intendend uses: diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 703fca2e9d..2e718ecccb 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -11,37 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - admin, - builders::publication::{ - PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, SessionDeleteBuilder, - SessionPutBuilder, - }, - bytes::ZBytes, - encoding::Encoding, - handlers::{Callback, DefaultHandler}, - info::SessionInfo, - key_expr::{KeyExpr, KeyExprInner}, - publication::Priority, - query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, - queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, - sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, - selector::{Selector, TIME_RANGE_KEY}, - subscriber::{SubscriberBuilder, SubscriberState}, - value::Value, - Id, -}; -use crate::net::{ - primitives::Primitives, - routing::dispatcher::face::Face, - runtime::{Runtime, RuntimeBuilder}, -}; -use std::future::IntoFuture; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, fmt, - future::Ready, + future::{IntoFuture, Ready}, ops::Deref, sync::{ atomic::{AtomicU16, Ordering}, @@ -49,6 +23,7 @@ use std::{ }, time::Duration, }; + use tracing::{error, trace, warn}; use uhlc::HLC; use zenoh_buffers::ZBuf; @@ -82,6 +57,26 @@ use zenoh_result::ZResult; use zenoh_shm::api::client_storage::SharedMemoryClientStorage; use zenoh_task::TaskController; +use super::{ + admin, + builders::publication::{ + PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, SessionDeleteBuilder, + SessionPutBuilder, + }, + bytes::ZBytes, + encoding::Encoding, + handlers::{Callback, DefaultHandler}, + info::SessionInfo, + key_expr::{KeyExpr, KeyExprInner}, + publication::Priority, + query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, + queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, + sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, + selector::{Selector, TIME_RANGE_KEY}, + subscriber::{SubscriberBuilder, SubscriberState}, + value::Value, + Id, +}; #[cfg(feature = "unstable")] use super::{ liveliness::{Liveliness, LivelinessTokenState}, @@ -90,6 +85,11 @@ use super::{ query::_REPLY_KEY_EXPR_ANY_SEL_PARAM, sample::SourceInfo, }; +use crate::net::{ + primitives::Primitives, + routing::dispatcher::face::Face, + runtime::{Runtime, RuntimeBuilder}, +}; zconfigurable! { pub(crate) static ref API_DATA_RECEPTION_CHANNEL_SIZE: usize = 256; diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index 0c4e21b547..ba345f5116 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -12,26 +12,26 @@ // ZettaScale Zenoh Team, // -use super::{ - handlers::{locked, Callback, DefaultHandler, IntoHandler}, - key_expr::KeyExpr, - sample::{Locality, Sample}, - session::{SessionRef, Undeclarable}, - Id, -}; -use std::future::IntoFuture; use std::{ fmt, - future::Ready, + future::{IntoFuture, Ready}, ops::{Deref, DerefMut}, sync::Arc, }; + use zenoh_core::{Resolvable, Wait}; +#[cfg(feature = "unstable")] +use zenoh_protocol::core::EntityGlobalId; use zenoh_protocol::{core::Reliability, network::declare::subscriber::ext::SubscriberInfo}; use zenoh_result::ZResult; -#[cfg(feature = "unstable")] -use zenoh_protocol::core::EntityGlobalId; +use super::{ + handlers::{locked, Callback, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + sample::{Locality, Sample}, + session::{SessionRef, Undeclarable}, + Id, +}; pub(crate) struct SubscriberState { pub(crate) id: Id, diff --git a/zenoh/src/api/time.rs b/zenoh/src/api/time.rs index 5d0d06765d..cbdabe3a7e 100644 --- a/zenoh/src/api/time.rs +++ b/zenoh/src/api/time.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::convert::TryFrom; + use zenoh_protocol::core::{Timestamp, TimestampId}; /// Generates a reception [`Timestamp`] with id=0x01. diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 71ab3a72e8..bb2bccb869 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -110,8 +110,7 @@ pub const FEATURES: &str = zenoh_util::concat_enabled_features!( ); // Expose some functions directly to root `zenoh::`` namespace for convenience -pub use crate::api::scouting::scout; -pub use crate::api::session::open; +pub use crate::api::{scouting::scout, session::open}; pub mod prelude; @@ -119,27 +118,24 @@ pub mod prelude; pub mod core { #[allow(deprecated)] pub use zenoh_core::AsyncResolve; - pub use zenoh_core::Resolvable; - pub use zenoh_core::Resolve; #[allow(deprecated)] pub use zenoh_core::SyncResolve; - pub use zenoh_core::Wait; + pub use zenoh_core::{Resolvable, Resolve, Wait}; /// A zenoh error. pub use zenoh_result::Error; /// A zenoh result. pub use zenoh_result::ZResult as Result; - pub use zenoh_util::core::zresult::ErrNo; - pub use zenoh_util::try_init_log_from_env; + pub use zenoh_util::{core::zresult::ErrNo, try_init_log_from_env}; } /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate /// reading and writing data. pub mod buffers { - pub use zenoh_buffers::buffer::SplitBuffer; - pub use zenoh_buffers::reader::HasReader; - pub use zenoh_buffers::reader::Reader; - pub use zenoh_buffers::ZBufReader; - pub use zenoh_buffers::{ZBuf, ZSlice, ZSliceBuffer}; + pub use zenoh_buffers::{ + buffer::SplitBuffer, + reader::{HasReader, Reader}, + ZBuf, ZBufReader, ZSlice, ZSliceBuffer, + }; } /// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. @@ -174,18 +170,16 @@ pub mod buffers { /// as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. pub mod key_expr { pub mod keyexpr_tree { - pub use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; pub use zenoh_keyexpr::keyexpr_tree::{ - support::NonWild, support::UnknownWildness, KeBoxTree, + impls::KeyedSetProvider, + support::{NonWild, UnknownWildness}, + IKeyExprTree, IKeyExprTreeMut, KeBoxTree, }; - pub use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; } - pub use crate::api::key_expr::KeyExpr; - pub use crate::api::key_expr::KeyExprUndeclaration; - pub use zenoh_keyexpr::keyexpr; - pub use zenoh_keyexpr::OwnedKeyExpr; - pub use zenoh_keyexpr::SetIntersectionLevel; + pub use zenoh_keyexpr::{keyexpr, OwnedKeyExpr, SetIntersectionLevel}; pub use zenoh_macros::{kedefine, keformat, kewrite}; + + pub use crate::api::key_expr::{KeyExpr, KeyExprUndeclaration}; // keyexpr format macro support pub mod format { pub use zenoh_keyexpr::format::*; @@ -197,20 +191,16 @@ pub mod key_expr { /// Zenoh [`Session`](crate::session::Session) and associated types pub mod session { - pub use crate::api::builders::publication::SessionDeleteBuilder; - pub use crate::api::builders::publication::SessionPutBuilder; #[zenoh_macros::unstable] #[doc(hidden)] pub use crate::api::session::init; - pub use crate::api::session::open; #[zenoh_macros::unstable] #[doc(hidden)] pub use crate::api::session::InitBuilder; - pub use crate::api::session::OpenBuilder; - pub use crate::api::session::Session; - pub use crate::api::session::SessionDeclarations; - pub use crate::api::session::SessionRef; - pub use crate::api::session::Undeclarable; + pub use crate::api::{ + builders::publication::{SessionDeleteBuilder, SessionPutBuilder}, + session::{open, OpenBuilder, Session, SessionDeclarations, SessionRef, Undeclarable}, + }; } /// Tools to access information about the current zenoh [`Session`](crate::Session). @@ -223,22 +213,17 @@ pub mod info { /// Sample primitives pub mod sample { - pub use crate::api::builders::sample::QoSBuilderTrait; - pub use crate::api::builders::sample::SampleBuilder; - pub use crate::api::builders::sample::SampleBuilderAny; - pub use crate::api::builders::sample::SampleBuilderDelete; - pub use crate::api::builders::sample::SampleBuilderPut; - pub use crate::api::builders::sample::SampleBuilderTrait; - pub use crate::api::builders::sample::TimestampBuilderTrait; - pub use crate::api::builders::sample::ValueBuilderTrait; #[zenoh_macros::unstable] pub use crate::api::sample::Locality; - pub use crate::api::sample::Sample; - pub use crate::api::sample::SampleFields; - pub use crate::api::sample::SampleKind; #[zenoh_macros::unstable] pub use crate::api::sample::SourceInfo; - pub use crate::api::sample::SourceSn; + pub use crate::api::{ + builders::sample::{ + QoSBuilderTrait, SampleBuilder, SampleBuilderAny, SampleBuilderDelete, + SampleBuilderPut, SampleBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait, + }, + sample::{Sample, SampleFields, SampleKind, SourceSn}, + }; } /// Value primitives @@ -253,42 +238,32 @@ pub mod encoding { /// Payload primitives pub mod bytes { - pub use crate::api::bytes::Deserialize; - pub use crate::api::bytes::OptionZBytes; - pub use crate::api::bytes::Serialize; - pub use crate::api::bytes::StringOrBase64; - pub use crate::api::bytes::ZBytes; - pub use crate::api::bytes::ZBytesIterator; - pub use crate::api::bytes::ZBytesReader; - pub use crate::api::bytes::ZBytesWriter; - pub use crate::api::bytes::ZDeserializeError; - pub use crate::api::bytes::ZSerde; + pub use crate::api::bytes::{ + Deserialize, OptionZBytes, Serialize, StringOrBase64, ZBytes, ZBytesIterator, ZBytesReader, + ZBytesWriter, ZDeserializeError, ZSerde, + }; } /// [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries pub mod selector { - pub use crate::api::selector::Parameters; - pub use crate::api::selector::Selector; - pub use crate::api::selector::TIME_RANGE_KEY; pub use zenoh_protocol::core::Properties; pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; + + pub use crate::api::selector::{Parameters, Selector, TIME_RANGE_KEY}; } /// Subscribing primitives pub mod subscriber { - pub use crate::api::subscriber::FlumeSubscriber; - pub use crate::api::subscriber::Subscriber; - pub use crate::api::subscriber::SubscriberBuilder; /// The kind of reliability. pub use zenoh_protocol::core::Reliability; + + pub use crate::api::subscriber::{FlumeSubscriber, Subscriber, SubscriberBuilder}; } /// Publishing primitives pub mod publication { - pub use crate::api::builders::publication::PublicationBuilderDelete; - pub use crate::api::builders::publication::PublicationBuilderPut; - pub use crate::api::builders::publication::PublisherBuilder; - pub use crate::api::builders::publication::PublisherDeleteBuilder; + pub use zenoh_protocol::core::CongestionControl; + #[zenoh_macros::unstable] pub use crate::api::publication::MatchingListener; #[zenoh_macros::unstable] @@ -297,88 +272,81 @@ pub mod publication { pub use crate::api::publication::MatchingListenerUndeclaration; #[zenoh_macros::unstable] pub use crate::api::publication::MatchingStatus; - pub use crate::api::publication::Priority; - pub use crate::api::publication::Publisher; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherDeclarations; #[zenoh_macros::unstable] pub use crate::api::publication::PublisherRef; - pub use crate::api::publication::PublisherUndeclaration; - pub use zenoh_protocol::core::CongestionControl; + pub use crate::api::{ + builders::publication::{ + PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, + PublisherDeleteBuilder, + }, + publication::{Priority, Publisher, PublisherUndeclaration}, + }; } /// Query primitives pub mod query { - pub use crate::api::query::GetBuilder; - pub use crate::api::query::Reply; #[zenoh_macros::unstable] pub use crate::api::query::ReplyKeyExpr; #[zenoh_macros::unstable] pub use crate::api::query::REPLY_KEY_EXPR_ANY_SEL_PARAM; - pub use crate::api::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::api::query::{ + ConsolidationMode, GetBuilder, QueryConsolidation, QueryTarget, Reply, + }; } /// Queryable primitives pub mod queryable { - pub use crate::api::queryable::Query; - pub use crate::api::queryable::Queryable; - pub use crate::api::queryable::QueryableBuilder; - pub use crate::api::queryable::QueryableUndeclaration; - pub use crate::api::queryable::ReplyBuilder; - pub use crate::api::queryable::ReplyBuilderDelete; - pub use crate::api::queryable::ReplyBuilderPut; - pub use crate::api::queryable::ReplyErrBuilder; #[zenoh_macros::unstable] pub use crate::api::queryable::ReplySample; + pub use crate::api::queryable::{ + Query, Queryable, QueryableBuilder, QueryableUndeclaration, ReplyBuilder, + ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder, + }; } /// Callback handler trait pub mod handlers { - pub use crate::api::handlers::locked; - pub use crate::api::handlers::Callback; - pub use crate::api::handlers::CallbackDrop; - pub use crate::api::handlers::DefaultHandler; - pub use crate::api::handlers::FifoChannel; - pub use crate::api::handlers::IntoHandler; - pub use crate::api::handlers::RingChannel; - pub use crate::api::handlers::RingChannelHandler; + pub use crate::api::handlers::{ + locked, Callback, CallbackDrop, DefaultHandler, FifoChannel, IntoHandler, RingChannel, + RingChannelHandler, + }; } /// Scouting primitives pub mod scouting { - pub use crate::api::scouting::scout; - pub use crate::api::scouting::Scout; - pub use crate::api::scouting::ScoutBuilder; /// Constants and helpers for zenoh `whatami` flags. pub use zenoh_protocol::core::WhatAmI; /// A zenoh Hello message. pub use zenoh_protocol::scouting::Hello; + + pub use crate::api::scouting::{scout, Scout, ScoutBuilder}; } /// Liveliness primitives #[cfg(feature = "unstable")] pub mod liveliness { - pub use crate::api::liveliness::Liveliness; - pub use crate::api::liveliness::LivelinessGetBuilder; - pub use crate::api::liveliness::LivelinessSubscriberBuilder; - pub use crate::api::liveliness::LivelinessToken; - pub use crate::api::liveliness::LivelinessTokenBuilder; - pub use crate::api::liveliness::LivelinessTokenUndeclaration; + pub use crate::api::liveliness::{ + Liveliness, LivelinessGetBuilder, LivelinessSubscriberBuilder, LivelinessToken, + LivelinessTokenBuilder, LivelinessTokenUndeclaration, + }; } /// Timestamp support pub mod time { - pub use crate::api::time::new_reception_timestamp; pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; + + pub use crate::api::time::new_reception_timestamp; } /// Initialize a Session with an existing Runtime. /// This operation is used by the plugins to share the same Runtime as the router. #[doc(hidden)] pub mod runtime { - pub use crate::net::runtime::RuntimeBuilder; - pub use crate::net::runtime::{AdminSpace, Runtime}; pub use zenoh_runtime::ZRuntime; + + pub use crate::net::runtime::{AdminSpace, Runtime, RuntimeBuilder}; } /// Configuration to pass to [`open`](crate::session::open) and [`scout`](crate::scouting::scout) functions and associated constants @@ -393,42 +361,39 @@ pub mod config { #[doc(hidden)] #[cfg(all(feature = "unstable", feature = "plugins"))] pub mod plugins { - pub use crate::api::plugins::PluginsManager; - pub use crate::api::plugins::Response; - pub use crate::api::plugins::RunningPlugin; - pub use crate::api::plugins::PLUGIN_PREFIX; - pub use crate::api::plugins::{RunningPluginTrait, ZenohPlugin}; + pub use crate::api::plugins::{ + PluginsManager, Response, RunningPlugin, RunningPluginTrait, ZenohPlugin, PLUGIN_PREFIX, + }; } #[doc(hidden)] pub mod internal { - pub use zenoh_core::zasync_executor_init; - pub use zenoh_core::zerror; - pub use zenoh_core::zlock; - pub use zenoh_core::ztimeout; + pub use zenoh_core::{zasync_executor_init, zerror, zlock, ztimeout}; pub use zenoh_result::bail; pub use zenoh_sync::Condition; - pub use zenoh_task::TaskController; - pub use zenoh_task::TerminatableTask; - pub use zenoh_util::core::ResolveFuture; - pub use zenoh_util::LibLoader; - pub use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR}; + pub use zenoh_task::{TaskController, TerminatableTask}; + pub use zenoh_util::{ + core::ResolveFuture, zenoh_home, LibLoader, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR, + }; } #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { - pub use zenoh_shm::api::client_storage::SharedMemoryClientStorage; - pub use zenoh_shm::api::provider::shared_memory_provider::{BlockOn, GarbageCollect}; - pub use zenoh_shm::api::provider::shared_memory_provider::{Deallocate, Defragment}; - pub use zenoh_shm::api::provider::types::AllocAlignment; - pub use zenoh_shm::api::provider::types::MemoryLayout; - pub use zenoh_shm::api::slice::zsliceshm::{zsliceshm, ZSliceShm}; - pub use zenoh_shm::api::slice::zsliceshmmut::{zsliceshmmut, ZSliceShmMut}; pub use zenoh_shm::api::{ + client_storage::SharedMemoryClientStorage, protocol_implementations::posix::{ posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, - provider::shared_memory_provider::SharedMemoryProviderBuilder, + provider::{ + shared_memory_provider::{ + BlockOn, Deallocate, Defragment, GarbageCollect, SharedMemoryProviderBuilder, + }, + types::{AllocAlignment, MemoryLayout}, + }, + slice::{ + zsliceshm::{zsliceshm, ZSliceShm}, + zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + }, }; } diff --git a/zenoh/src/net/codec/linkstate.rs b/zenoh/src/net/codec/linkstate.rs index 4954062a3d..a66163728c 100644 --- a/zenoh/src/net/codec/linkstate.rs +++ b/zenoh/src/net/codec/linkstate.rs @@ -11,12 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::Zenoh080Routing; -use crate::net::protocol::{ - linkstate, - linkstate::{LinkState, LinkStateList}, -}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +23,12 @@ use zenoh_protocol::{ core::{Locator, WhatAmI, ZenohId}, }; +use super::Zenoh080Routing; +use crate::net::protocol::{ + linkstate, + linkstate::{LinkState, LinkStateList}, +}; + // LinkState impl WCodec<&LinkState, &mut W> for Zenoh080Routing where diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index e58e01a1b5..b400d1a254 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -11,18 +11,19 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{any::Any, sync::Arc}; + +use zenoh_link::Link; +use zenoh_protocol::network::{NetworkBody, NetworkMessage}; +use zenoh_result::ZResult; +use zenoh_transport::{unicast::TransportUnicast, TransportPeerEventHandler}; + use super::Primitives; use crate::net::routing::{ dispatcher::face::Face, interceptor::{InterceptorTrait, InterceptorsChain}, RoutingContext, }; -use std::{any::Any, sync::Arc}; -use zenoh_link::Link; -use zenoh_protocol::network::{NetworkBody, NetworkMessage}; -use zenoh_result::ZResult; -use zenoh_transport::unicast::TransportUnicast; -use zenoh_transport::TransportPeerEventHandler; pub struct DeMux { face: Face, diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 8589fab518..df292b4315 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -11,19 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{EPrimitives, Primitives}; -use crate::net::routing::{ - dispatcher::face::{Face, WeakFace}, - interceptor::{InterceptorTrait, InterceptorsChain}, - RoutingContext, -}; use std::sync::OnceLock; + use zenoh_protocol::network::{ interest::Interest, Declare, NetworkBody, NetworkMessage, Push, Request, Response, ResponseFinal, }; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; +use super::{EPrimitives, Primitives}; +use crate::net::routing::{ + dispatcher::face::{Face, WeakFace}, + interceptor::{InterceptorTrait, InterceptorsChain}, + RoutingContext, +}; + pub struct Mux { pub handler: TransportUnicast, pub(crate) face: OnceLock, diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 06d55de920..c5129f76e2 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -11,21 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::super::router::*; -use super::tables::TablesLock; -use super::{resource::*, tables}; -use crate::api::key_expr::KeyExpr; -use crate::net::primitives::{McastMux, Mux, Primitives}; -use crate::net::routing::interceptor::{InterceptorTrait, InterceptorsChain}; -use std::any::Any; -use std::collections::HashMap; -use std::fmt; -use std::sync::{Arc, Weak}; +use std::{ + any::Any, + collections::HashMap, + fmt, + sync::{Arc, Weak}, +}; + use tokio_util::sync::CancellationToken; -use zenoh_protocol::zenoh::RequestBody; use zenoh_protocol::{ core::{ExprId, WhatAmI, ZenohId}, network::{Mapping, Push, Request, RequestId, Response, ResponseFinal}, + zenoh::RequestBody, }; use zenoh_sync::get_mut_unchecked; use zenoh_task::TaskController; @@ -33,6 +30,15 @@ use zenoh_transport::multicast::TransportMulticast; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; +use super::{super::router::*, resource::*, tables, tables::TablesLock}; +use crate::{ + api::key_expr::KeyExpr, + net::{ + primitives::{McastMux, Mux, Primitives}, + routing::interceptor::{InterceptorTrait, InterceptorsChain}, + }, +}; + pub struct FaceState { pub(crate) id: usize, pub(crate) zid: ZenohId, diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index fe2274ed64..94c6f7b1a6 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -11,23 +11,26 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -use super::resource::{DataRoutes, Direction, Resource}; -use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; -use crate::net::routing::hat::HatTrait; -use std::collections::HashMap; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; + use zenoh_core::zread; -use zenoh_protocol::core::key_expr::keyexpr; -use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ - core::{WhatAmI, WireExpr}, - network::{declare::ext, Push}, + core::{key_expr::keyexpr, WhatAmI, WireExpr}, + network::{ + declare::{ext, subscriber::ext::SubscriberInfo, SubscriberId}, + Push, + }, zenoh::PushBody, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face::FaceState, + resource::{DataRoutes, Direction, Resource}, + tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}, +}; +use crate::net::routing::hat::HatTrait; + pub(crate) fn declare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index cd17f1339f..2bbc924e0b 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -11,16 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; -use super::tables::NodeId; -use super::tables::{RoutingExpr, Tables, TablesLock}; -use crate::net::routing::hat::HatTrait; -use crate::net::routing::RoutingContext; +use std::{ + collections::HashMap, + sync::{Arc, Weak}, + time::Duration, +}; + use async_trait::async_trait; -use std::collections::HashMap; -use std::sync::{Arc, Weak}; -use std::time::Duration; use tokio_util::sync::CancellationToken; use zenoh_config::WhatAmI; use zenoh_protocol::{ @@ -35,6 +32,13 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; use zenoh_util::Timed; +use super::{ + face::FaceState, + resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}, + tables::{NodeId, RoutingExpr, Tables, TablesLock}, +}; +use crate::net::routing::{hat::HatTrait, RoutingContext}; + pub(crate) struct Query { src_face: Arc, src_qid: RequestId, diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index bc0aecb9bb..d8765e16ae 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -11,17 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -use super::tables::{Tables, TablesLock}; -use crate::net::routing::dispatcher::face::Face; -use crate::net::routing::RoutingContext; -use std::any::Any; -use std::collections::HashMap; -use std::convert::TryInto; -use std::hash::{Hash, Hasher}; -use std::sync::{Arc, Weak}; +use std::{ + any::Any, + collections::HashMap, + convert::TryInto, + hash::{Hash, Hasher}, + sync::{Arc, Weak}, +}; + use zenoh_config::WhatAmI; -use zenoh_protocol::network::RequestId; use zenoh_protocol::{ core::{key_expr::keyexpr, ExprId, WireExpr}, network::{ @@ -29,11 +27,17 @@ use zenoh_protocol::{ ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, }, - Mapping, + Mapping, RequestId, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face::FaceState, + tables::{Tables, TablesLock}, +}; +use crate::net::routing::{dispatcher::face::Face, RoutingContext}; + pub(crate) type NodeId = u16; pub(crate) type Direction = (Arc, WireExpr<'static>, NodeId); diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 72cee0b452..2853cc5a9f 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -11,27 +11,30 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -pub use super::pubsub::*; -pub use super::queries::*; -pub use super::resource::*; -use crate::net::routing::hat; -use crate::net::routing::hat::HatTrait; -use crate::net::routing::interceptor::interceptor_factories; -use crate::net::routing::interceptor::InterceptorFactory; -use std::any::Any; -use std::collections::HashMap; -use std::sync::{Arc, Weak}; -use std::sync::{Mutex, RwLock}; -use std::time::Duration; +use std::{ + any::Any, + collections::HashMap, + sync::{Arc, Mutex, RwLock, Weak}, + time::Duration, +}; + use uhlc::HLC; -use zenoh_config::unwrap_or_default; -use zenoh_config::Config; -use zenoh_protocol::core::{ExprId, WhatAmI, ZenohId}; -use zenoh_protocol::network::Mapping; +use zenoh_config::{unwrap_or_default, Config}; +use zenoh_protocol::{ + core::{ExprId, WhatAmI, ZenohId}, + network::Mapping, +}; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; +use super::face::FaceState; +pub use super::{pubsub::*, queries::*, resource::*}; +use crate::net::routing::{ + hat, + hat::HatTrait, + interceptor::{interceptor_factories, InterceptorFactory}, +}; + pub(crate) struct RoutingExpr<'a> { pub(crate) prefix: &'a Arc, pub(crate) suffix: &'a str, diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 6ca0af1e17..3b4e7c7103 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -17,14 +17,21 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use crate::{ - net::routing::{ - dispatcher::face::Face, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, - }, - net::runtime::Runtime, +use std::{ + any::Any, + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, }; +use zenoh_config::WhatAmI; +use zenoh_protocol::network::{ + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + Oam, +}; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_transport::unicast::TransportUnicast; + use self::{ pubsub::{pubsub_new_face, undeclare_client_subscription}, queries::{queries_new_face, undeclare_client_queryable}, @@ -36,19 +43,13 @@ use super::{ }, HatBaseTrait, HatTrait, }; -use std::{ - any::Any, - collections::HashMap, - sync::{atomic::AtomicU32, Arc}, -}; -use zenoh_config::WhatAmI; -use zenoh_protocol::network::declare::{ - queryable::ext::QueryableInfoType, QueryableId, SubscriberId, +use crate::net::{ + routing::{ + dispatcher::face::Face, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, + runtime::Runtime, }; -use zenoh_protocol::network::Oam; -use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; -use zenoh_transport::unicast::TransportUnicast; mod pubsub; mod queries; diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index dd35cf24c8..3334fbfb14 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -11,30 +11,33 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use zenoh_protocol::{ - core::{Reliability, WhatAmI}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn propagate_simple_subscription_to( _tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 777198ed95..c915d788a9 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -11,33 +11,41 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ - core::{WhatAmI, WireExpr}, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, WireExpr, + }, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareQueryable, QueryableId, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{HatQueriesTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { this.complete = this.complete || info.complete; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index beb2d6ef68..e76f53a0dd 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -17,36 +17,13 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use self::{ - network::Network, - pubsub::{pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, - queries::{queries_new_face, queries_remove_node, undeclare_client_queryable}, -}; -use super::{ - super::dispatcher::{ - face::FaceState, - tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, - }, - HatBaseTrait, HatTrait, -}; -use crate::{ - net::runtime::Runtime, - net::{ - codec::Zenoh080Routing, - protocol::linkstate::LinkStateList, - routing::{ - dispatcher::face::Face, - hat::TREES_COMPUTATION_DELAY_MS, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, - }, - }, -}; use std::{ any::Any, collections::{HashMap, HashSet}, sync::{atomic::AtomicU32, Arc}, time::Duration, }; + use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, @@ -61,6 +38,29 @@ use zenoh_sync::get_mut_unchecked; use zenoh_task::TerminatableTask; use zenoh_transport::unicast::TransportUnicast; +use self::{ + network::Network, + pubsub::{pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, + queries::{queries_new_face, queries_remove_node, undeclare_client_queryable}, +}; +use super::{ + super::dispatcher::{ + face::FaceState, + tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, + }, + HatBaseTrait, HatTrait, +}; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + hat::TREES_COMPUTATION_DELAY_MS, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, + runtime::Runtime, +}; + mod network; mod pubsub; mod queries; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 9c8e0c8860..2a26b1f583 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -11,26 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::NodeId; -use crate::net::runtime::Runtime; -use crate::net::runtime::WeakRuntime; -use petgraph::graph::NodeIndex; -use petgraph::visit::{VisitMap, Visitable}; -use rand::Rng; use std::convert::TryInto; + +use petgraph::{ + graph::NodeIndex, + visit::{VisitMap, Visitable}, +}; +use rand::Rng; use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; +use zenoh_buffers::{ + writer::{DidntWrite, HasWriter}, + ZBuf, +}; use zenoh_codec::WCodec; use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_protocol::{ + common::ZExtBody, + core::{WhatAmI, WhatAmIMatcher, ZenohId}, + network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, +}; use zenoh_transport::unicast::TransportUnicast; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::{LinkState, LinkStateList}, + routing::dispatcher::tables::NodeId, + runtime::{Runtime, WeakRuntime}, +}; + #[derive(Clone)] struct Details { zid: bool, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 2c1cbb23e7..e5f7da81f7 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -11,33 +11,38 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::pubsub::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + sync::{atomic::Ordering, Arc}, +}; + use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ - core::{Reliability, WhatAmI, ZenohId}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face_hat, face_hat_mut, get_peer, get_routes_entries, hat, hat_mut, network::Network, res_hat, + res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn send_sourced_subscription_to_net_childs( tables: &Tables, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index a227d845ba..bed683f717 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -11,36 +11,46 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::queries::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ - core::{WhatAmI, WireExpr, ZenohId}, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, WireExpr, ZenohId, + }, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareQueryable, QueryableId, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face_hat, face_hat_mut, get_peer, get_routes_entries, hat, hat_mut, network::Network, res_hat, + res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + queries::*, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{HatQueriesTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { this.complete = this.complete || info.complete; diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index ee6557aac3..5eb812df71 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -17,15 +17,8 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use super::{ - dispatcher::{ - face::{Face, FaceState}, - tables::{NodeId, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, TablesLock}, - }, - router::RoutesIndexes, -}; -use crate::net::runtime::Runtime; use std::{any::Any, sync::Arc}; + use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI, ZenohId}; use zenoh_protocol::{ @@ -41,6 +34,15 @@ use zenoh_protocol::{ use zenoh_result::ZResult; use zenoh_transport::unicast::TransportUnicast; +use super::{ + dispatcher::{ + face::{Face, FaceState}, + tables::{NodeId, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, TablesLock}, + }, + router::RoutesIndexes, +}; +use crate::net::runtime::Runtime; + mod client; mod linkstate_peer; mod p2p_peer; diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index df04b396ab..57b76fc086 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -11,24 +11,30 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::runtime::Runtime; -use crate::net::runtime::WeakRuntime; +use std::convert::TryInto; + use petgraph::graph::NodeIndex; use rand::Rng; -use std::convert::TryInto; use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; +use zenoh_buffers::{ + writer::{DidntWrite, HasWriter}, + ZBuf, +}; use zenoh_codec::WCodec; use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_protocol::{ + common::ZExtBody, + core::{WhatAmI, WhatAmIMatcher, ZenohId}, + network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, +}; use zenoh_transport::unicast::TransportUnicast; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::{LinkState, LinkStateList}, + runtime::{Runtime, WeakRuntime}, +}; + #[derive(Clone)] struct Details { zid: bool, diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index ba41e0f114..530c181335 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -17,17 +17,24 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use crate::{ - net::runtime::Runtime, - net::{ - codec::Zenoh080Routing, - protocol::linkstate::LinkStateList, - routing::{ - dispatcher::face::Face, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, - }, +use std::{ + any::Any, + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, +}; + +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; +use zenoh_protocol::{ + common::ZExtBody, + network::{ + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + oam::id::OAM_LINKSTATE, + Oam, }, }; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_transport::unicast::TransportUnicast; use self::{ gossip::Network, @@ -41,23 +48,15 @@ use super::{ }, HatBaseTrait, HatTrait, }; -use std::{ - any::Any, - collections::HashMap, - sync::{atomic::AtomicU32, Arc}, -}; -use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; -use zenoh_protocol::network::{ - declare::{QueryableId, SubscriberId}, - Oam, -}; -use zenoh_protocol::{ - common::ZExtBody, - network::{declare::queryable::ext::QueryableInfoType, oam::id::OAM_LINKSTATE}, +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, + runtime::Runtime, }; -use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; -use zenoh_transport::unicast::TransportUnicast; mod gossip; mod pubsub; diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index d57c2ac665..e7cf0c5e5d 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -11,30 +11,33 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use zenoh_protocol::{ - core::{Reliability, WhatAmI}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn propagate_simple_subscription_to( _tables: &mut Tables, diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 25fed11842..f0de12d7b9 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -11,33 +11,41 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ - core::{WhatAmI, WireExpr}, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, WireExpr, + }, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareQueryable, QueryableId, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{HatQueriesTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { this.complete = this.complete || info.complete; diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 85ce0e6916..f573acee43 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -17,34 +17,6 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use self::{ - network::{shared_nodes, Network}, - pubsub::{ - pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, - }, - queries::{ - queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, - }, -}; -use super::{ - super::dispatcher::{ - face::FaceState, - tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, - }, - HatBaseTrait, HatTrait, -}; -use crate::{ - net::runtime::Runtime, - net::{ - codec::Zenoh080Routing, - protocol::linkstate::LinkStateList, - routing::{ - dispatcher::face::Face, - hat::TREES_COMPUTATION_DELAY_MS, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, - }, - }, -}; use std::{ any::Any, collections::{hash_map::DefaultHasher, HashMap, HashSet}, @@ -52,6 +24,7 @@ use std::{ sync::{atomic::AtomicU32, Arc}, time::Duration, }; + use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, @@ -66,6 +39,33 @@ use zenoh_sync::get_mut_unchecked; use zenoh_task::TerminatableTask; use zenoh_transport::unicast::TransportUnicast; +use self::{ + network::{shared_nodes, Network}, + pubsub::{ + pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, + }, + queries::{ + queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, + }, +}; +use super::{ + super::dispatcher::{ + face::FaceState, + tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, + }, + HatBaseTrait, HatTrait, +}; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + hat::TREES_COMPUTATION_DELAY_MS, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, + runtime::Runtime, +}; + mod network; mod pubsub; mod queries; diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index 3ff59b5ede..ae435a6871 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -11,25 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::NodeId; -use crate::net::runtime::Runtime; -use petgraph::graph::NodeIndex; -use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; -use rand::Rng; use std::convert::TryInto; + +use petgraph::{ + graph::NodeIndex, + visit::{IntoNodeReferences, VisitMap, Visitable}, +}; +use rand::Rng; use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; +use zenoh_buffers::{ + writer::{DidntWrite, HasWriter}, + ZBuf, +}; use zenoh_codec::WCodec; use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_protocol::{ + common::ZExtBody, + core::{WhatAmI, WhatAmIMatcher, ZenohId}, + network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, +}; use zenoh_transport::unicast::TransportUnicast; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::{LinkState, LinkStateList}, + routing::dispatcher::tables::NodeId, + runtime::Runtime, +}; + #[derive(Clone)] struct Details { zid: bool, diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 99b7eb3c12..14726ac970 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -11,33 +11,38 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::pubsub::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + sync::{atomic::Ordering, Arc}, +}; + use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ - core::{Reliability, WhatAmI, ZenohId}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, + network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn send_sourced_subscription_to_net_childs( tables: &Tables, diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index dbd7da8629..9defb80081 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -11,36 +11,46 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::queries::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::atomic::Ordering; -use std::sync::Arc; use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ - core::{WhatAmI, WireExpr, ZenohId}, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, WireExpr, ZenohId, + }, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + DeclareQueryable, QueryableId, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, + network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + queries::*, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{HatQueriesTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, +}; + #[inline] fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { this.complete = this.complete || info.complete; diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 102e30a0df..5f579bf409 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -18,14 +18,8 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use super::{ - authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, - InterceptorFactoryTrait, InterceptorTrait, -}; -use crate::api::key_expr::KeyExpr; -use crate::net::routing::RoutingContext; -use std::any::Any; -use std::sync::Arc; +use std::{any::Any, sync::Arc}; + use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject, ZenohId}; use zenoh_protocol::{ network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, @@ -33,6 +27,12 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; + +use super::{ + authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, + InterceptorFactoryTrait, InterceptorTrait, +}; +use crate::{api::key_expr::KeyExpr, net::routing::RoutingContext}; pub struct AclEnforcer { enforcer: Arc, } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 61c1cba217..f1cdb1ca4e 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -17,13 +17,16 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use ahash::RandomState; use std::collections::HashMap; + +use ahash::RandomState; use zenoh_config::{ AclConfig, AclConfigRules, Action, InterceptorFlow, Permission, PolicyRule, Subject, }; -use zenoh_keyexpr::keyexpr; -use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; +use zenoh_keyexpr::{ + keyexpr, + keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}, +}; use zenoh_result::ZResult; type PolicyForSubject = FlowPolicy; diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs index cda132e806..06e86ec3ce 100644 --- a/zenoh/src/net/routing/interceptor/downsampling.rs +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -18,17 +18,21 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use crate::net::routing::interceptor::*; -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + use zenoh_config::{DownsamplingItemConf, DownsamplingRuleConf, InterceptorFlow}; use zenoh_core::zlock; -use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; -use zenoh_keyexpr::keyexpr_tree::{support::UnknownWildness, KeBoxTree}; -use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; +use zenoh_keyexpr::keyexpr_tree::{ + impls::KeyedSetProvider, support::UnknownWildness, IKeyExprTree, IKeyExprTreeMut, KeBoxTree, +}; use zenoh_protocol::network::NetworkBody; use zenoh_result::ZResult; +use crate::net::routing::interceptor::*; + pub(crate) fn downsampling_interceptor_factories( config: &Vec, ) -> ZResult> { diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 6d9391ce15..3be30e9205 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -22,8 +22,6 @@ mod access_control; use access_control::acl_interceptor_factories; mod authorization; -use super::RoutingContext; -use crate::api::key_expr::KeyExpr; use std::any::Any; use zenoh_config::Config; @@ -31,6 +29,9 @@ use zenoh_protocol::network::NetworkMessage; use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; +use super::RoutingContext; +use crate::api::key_expr::KeyExpr; + pub mod downsampling; use crate::net::routing::interceptor::downsampling::downsampling_interceptor_factories; diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 75b4d4ef6a..9601465326 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -24,11 +24,12 @@ pub mod router; use std::{cell::OnceCell, sync::Arc}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; +use zenoh_protocol::{ + core::{key_expr::OwnedKeyExpr, WireExpr}, + network::NetworkMessage, +}; use self::{dispatcher::face::Face, router::Resource}; - use super::runtime; pub(crate) static PREFIX_LIVELINESS: &str = "@/liveliness"; @@ -100,8 +101,7 @@ impl RoutingContext { impl RoutingContext { #[inline] pub(crate) fn wire_expr(&self) -> Option<&WireExpr> { - use zenoh_protocol::network::DeclareBody; - use zenoh_protocol::network::NetworkBody; + use zenoh_protocol::network::{DeclareBody, NetworkBody}; match &self.msg.body { NetworkBody::Push(m) => Some(&m.wire_expr), NetworkBody::Request(m) => Some(&m.wire_expr), diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 87766f021b..630253e1c6 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -11,33 +11,32 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::dispatcher::face::{Face, FaceState}; -pub use super::dispatcher::pubsub::*; -pub use super::dispatcher::queries::*; -pub use super::dispatcher::resource::*; -use super::dispatcher::tables::Tables; -use super::dispatcher::tables::TablesLock; -use super::hat; -use super::interceptor::EgressInterceptor; -use super::interceptor::InterceptorsChain; -use super::runtime::Runtime; -use crate::net::primitives::DeMux; -use crate::net::primitives::DummyPrimitives; -use crate::net::primitives::EPrimitives; -use crate::net::primitives::McastMux; -use crate::net::primitives::Mux; -use crate::net::routing::interceptor::IngressInterceptor; -use std::str::FromStr; -use std::sync::Arc; -use std::sync::{Mutex, RwLock}; +use std::{ + str::FromStr, + sync::{Arc, Mutex, RwLock}, +}; + use uhlc::HLC; use zenoh_config::Config; use zenoh_protocol::core::{WhatAmI, ZenohId}; -use zenoh_transport::multicast::TransportMulticast; -use zenoh_transport::unicast::TransportUnicast; -use zenoh_transport::TransportPeer; // use zenoh_collections::Timer; use zenoh_result::ZResult; +use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast, TransportPeer}; + +pub use super::dispatcher::{pubsub::*, queries::*, resource::*}; +use super::{ + dispatcher::{ + face::{Face, FaceState}, + tables::{Tables, TablesLock}, + }, + hat, + interceptor::{EgressInterceptor, InterceptorsChain}, + runtime::Runtime, +}; +use crate::net::{ + primitives::{DeMux, DummyPrimitives, EPrimitives, McastMux, Mux}, + routing::interceptor::IngressInterceptor, +}; pub struct Router { // whatami: WhatAmI, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 3f2e0b488f..8b53692ead 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -10,24 +10,13 @@ // // Contributors: // ZettaScale Zenoh Team, -use super::routing::dispatcher::face::Face; -use super::Runtime; -use crate::api::builders::sample::ValueBuilderTrait; -use crate::api::bytes::ZBytes; -use crate::api::key_expr::KeyExpr; -#[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::api::plugins::PluginsManager; -use crate::api::queryable::Query; -use crate::api::queryable::QueryInner; -use crate::api::value::Value; -use crate::encoding::Encoding; -use crate::net::primitives::Primitives; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + sync::{Arc, Mutex}, +}; + use serde_json::json; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::convert::TryInto; -use std::sync::Arc; -use std::sync::Mutex; use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI}; @@ -36,20 +25,35 @@ use zenoh_core::Wait; use zenoh_plugin_trait::{PluginControl, PluginStatus}; #[cfg(all(feature = "unstable", feature = "plugins"))] use zenoh_protocol::core::key_expr::keyexpr; -use zenoh_protocol::network::declare::QueryableId; -use zenoh_protocol::network::Interest; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID}, network::{ - declare::{queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo}, - ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, - ResponseFinal, + declare::{ + queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, + }, + ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Interest, Push, Request, + Response, ResponseFinal, }, zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; use zenoh_transport::unicast::TransportUnicast; +use super::{routing::dispatcher::face::Face, Runtime}; +#[cfg(all(feature = "unstable", feature = "plugins"))] +use crate::api::plugins::PluginsManager; +use crate::{ + api::{ + builders::sample::ValueBuilderTrait, + bytes::ZBytes, + key_expr::KeyExpr, + queryable::{Query, QueryInner}, + value::Value, + }, + encoding::Encoding, + net::primitives::Primitives, +}; + pub struct AdminContext { runtime: Runtime, version: String, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index f1cf4d95d2..f4eb0289ca 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -20,31 +20,28 @@ mod adminspace; pub mod orchestrator; -use super::primitives::DeMux; -use super::routing; -use super::routing::router::Router; -#[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::api::loader::{load_plugins, start_plugins}; -#[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::api::plugins::PluginsManager; -use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; -use crate::{GIT_VERSION, LONG_VERSION}; -pub use adminspace::AdminSpace; -use futures::stream::StreamExt; -use futures::Future; -use std::any::Any; -use std::sync::atomic::{AtomicU32, Ordering}; -use std::sync::{Arc, Weak}; #[cfg(all(feature = "unstable", feature = "plugins"))] use std::sync::{Mutex, MutexGuard}; -use std::time::Duration; +use std::{ + any::Any, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, Weak, + }, + time::Duration, +}; + +pub use adminspace::AdminSpace; +use futures::{stream::StreamExt, Future}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; use zenoh_link::{EndPoint, Link}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; -use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; -use zenoh_protocol::network::NetworkMessage; +use zenoh_protocol::{ + core::{Locator, WhatAmI, ZenohId}, + network::NetworkMessage, +}; use zenoh_result::{bail, ZResult}; #[cfg(all(feature = "unstable", feature = "shared-memory"))] use zenoh_shm::api::client_storage::SharedMemoryClientStorage; @@ -57,6 +54,16 @@ use zenoh_transport::{ TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; +use super::{primitives::DeMux, routing, routing::router::Router}; +#[cfg(all(feature = "unstable", feature = "plugins"))] +use crate::api::loader::{load_plugins, start_plugins}; +#[cfg(all(feature = "unstable", feature = "plugins"))] +use crate::api::plugins::PluginsManager; +use crate::{ + config::{unwrap_or_default, Config, ModeDependent, Notifier}, + GIT_VERSION, LONG_VERSION, +}; + pub(crate) struct RuntimeState { zid: ZenohId, whatami: WhatAmI, diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index c2c7ecedd2..610f189b58 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -11,14 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{Runtime, RuntimeSession}; +use std::{ + net::{IpAddr, Ipv6Addr, SocketAddr}, + time::Duration, +}; + use futures::prelude::*; use socket2::{Domain, Socket, Type}; -use std::net::{IpAddr, Ipv6Addr, SocketAddr}; -use std::time::Duration; use tokio::net::UdpSocket; -use zenoh_buffers::reader::DidntRead; -use zenoh_buffers::{reader::HasReader, writer::HasWriter}; +use zenoh_buffers::{ + reader::{DidntRead, HasReader}, + writer::HasWriter, +}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_config::{ get_global_connect_timeout, get_global_listener_timeout, unwrap_or_default, ModeDependent, @@ -30,6 +34,8 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use super::{Runtime, RuntimeSession}; + const RCV_BUF_SIZE: usize = u16::MAX as usize; const SCOUT_INITIAL_PERIOD: Duration = Duration::from_millis(1_000); const SCOUT_MAX_PERIOD: Duration = Duration::from_millis(8_000); diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 841bc209f6..5f04b73d53 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -11,23 +11,33 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::primitives::{DummyPrimitives, EPrimitives, Primitives}; -use crate::net::routing::dispatcher::tables::{self, Tables}; -use crate::net::routing::router::*; -use crate::net::routing::RoutingContext; -use std::convert::{TryFrom, TryInto}; -use std::sync::Arc; +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, +}; + use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_config::Config; use zenoh_core::zlock; -use zenoh_protocol::core::Encoding; -use zenoh_protocol::core::{ - key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, +use zenoh_protocol::{ + core::{ + key_expr::keyexpr, Encoding, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, + }, + network::{ + declare::subscriber::ext::SubscriberInfo, ext, Declare, DeclareBody, DeclareKeyExpr, + }, + zenoh::{PushBody, Put}, +}; + +use crate::net::{ + primitives::{DummyPrimitives, EPrimitives, Primitives}, + routing::{ + dispatcher::tables::{self, Tables}, + router::*, + RoutingContext, + }, }; -use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; -use zenoh_protocol::zenoh::{PushBody, Put}; #[test] fn base_test() { diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index ac466ae50b..54418d9f78 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -26,71 +26,59 @@ // Reexport API in flat namespace pub(crate) mod flat { - pub use crate::buffers::*; - pub use crate::bytes::*; - pub use crate::config::*; - pub use crate::core::{Error as ZError, Resolvable, Resolve, Result as ZResult}; - pub use crate::encoding::*; - pub use crate::handlers::*; - pub use crate::key_expr::*; - pub use crate::publication::*; - pub use crate::query::*; - pub use crate::queryable::*; - pub use crate::sample::*; - pub use crate::scouting::*; - pub use crate::selector::*; - pub use crate::session::*; #[cfg(feature = "shared-memory")] pub use crate::shm::*; - pub use crate::subscriber::*; - pub use crate::time::*; - pub use crate::value::*; + pub use crate::{ + buffers::*, + bytes::*, + config::*, + core::{Error as ZError, Resolvable, Resolve, Result as ZResult}, + encoding::*, + handlers::*, + key_expr::*, + publication::*, + query::*, + queryable::*, + sample::*, + scouting::*, + selector::*, + session::*, + subscriber::*, + time::*, + value::*, + }; } // Reexport API in hierarchical namespace pub(crate) mod mods { - pub use crate::buffers; - pub use crate::bytes; - pub use crate::config; - pub use crate::core; - pub use crate::encoding; - pub use crate::handlers; - pub use crate::key_expr; - pub use crate::publication; - pub use crate::query; - pub use crate::queryable; - pub use crate::sample; - pub use crate::scouting; - pub use crate::selector; - pub use crate::session; #[cfg(feature = "shared-memory")] pub use crate::shm; - pub use crate::subscriber; - pub use crate::time; - pub use crate::value; + pub use crate::{ + buffers, bytes, config, core, encoding, handlers, key_expr, publication, query, queryable, + sample, scouting, selector, session, subscriber, time, value, + }; } +pub use flat::*; +pub use mods::*; + #[allow(deprecated)] pub use crate::core::AsyncResolve; #[allow(deprecated)] pub use crate::core::SyncResolve; pub use crate::core::Wait; -pub use flat::*; -pub use mods::*; /// Prelude to import when using Zenoh's sync API. #[deprecated = "use `zenoh::prelude` instead"] pub mod sync { - pub use super::flat::*; - pub use super::mods::*; + pub use super::{flat::*, mods::*}; #[allow(deprecated)] pub use crate::core::SyncResolve; } /// Prelude to import when using Zenoh's async API. #[deprecated = "use `zenoh::prelude` instead"] pub mod r#async { - pub use super::flat::*; - pub use super::mods::*; + pub use super::{flat::*, mods::*}; #[allow(deprecated)] pub use crate::core::AsyncResolve; } diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 5f3c482581..1889a9f9fa 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -13,8 +13,11 @@ // #![cfg(target_family = "unix")] mod test { - use std::sync::{Arc, Mutex}; - use std::time::Duration; + use std::{ + sync::{Arc, Mutex}, + time::Duration, + }; + use tokio::runtime::Handle; use zenoh::prelude::*; use zenoh_core::{zlock, ztimeout}; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 836845a645..a63137ccfc 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -14,8 +14,7 @@ #[cfg(feature = "unstable")] #[test] fn attachment_pubsub() { - use zenoh::bytes::ZBytes; - use zenoh::prelude::*; + use zenoh::{bytes::ZBytes, prelude::*}; let zenoh = zenoh::open(Config::default()).wait().unwrap(); let _sub = zenoh .declare_subscriber("test/attachment") diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 99ca6055da..41a681dc8f 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::internal::ztimeout; -use zenoh::prelude::*; + +use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(10); diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs index 0862f9ee89..55f9368a87 100644 --- a/zenoh/tests/handler.rs +++ b/zenoh/tests/handler.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::{thread, time::Duration}; + use zenoh::prelude::*; #[test] diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 37f193630d..4113aa462d 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::sync::{Arc, Mutex}; -use zenoh::internal::zlock; -use zenoh::prelude::*; + +use zenoh::{internal::zlock, prelude::*}; #[cfg(target_os = "windows")] static MINIMAL_SLEEP_INTERVAL_MS: u64 = 17; diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 0456361419..b974b5d705 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -14,7 +14,9 @@ #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_liveliness() { - use {std::time::Duration, zenoh::internal::ztimeout, zenoh::prelude::*}; + use std::time::Duration; + + use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 6f44b2d0be..3a75cc9f37 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::internal::ztimeout; -use zenoh::prelude::*; + +use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 3c9f2723a6..fac785d7c0 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -11,14 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::str::FromStr; -use std::sync::atomic::Ordering; -use std::sync::{atomic::AtomicUsize, Arc}; -use std::time::Duration; +use std::{ + str::FromStr, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use tokio_util::{sync::CancellationToken, task::TaskTracker}; -use zenoh::core::Result; -use zenoh::internal::{bail, ztimeout}; -use zenoh::prelude::*; +use zenoh::{ + core::Result, + internal::{bail, ztimeout}, + prelude::*, +}; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 43dfc79470..4d0205f5be 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -11,13 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; -use zenoh::internal::ztimeout; -use zenoh::prelude::*; +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + #[cfg(feature = "unstable")] use zenoh::runtime::{Runtime, RuntimeBuilder}; +use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index ec77890c1e..14f6985414 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -13,11 +13,15 @@ // #[cfg(all(feature = "unstable", feature = "shared-memory"))] mod tests { - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Arc; - use std::time::Duration; - use zenoh::internal::ztimeout; - use zenoh::prelude::*; + use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }; + + use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index c5be555a00..b62a842b28 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -11,12 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use tokio::runtime::Handle; -use zenoh::internal::ztimeout; -use zenoh::prelude::*; +use zenoh::{internal::ztimeout, prelude::*}; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index cabee33333..229352e5db 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -14,16 +14,14 @@ use clap::Parser; use futures::future; use git_version::git_version; -use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::util::SubscriberInitExt; -use tracing_subscriber::EnvFilter; -use zenoh::config::EndPoint; -use zenoh::config::{Config, ModeDependentValue, PermissionsConf, ValidatedMap}; -use zenoh::core::Result; -use zenoh::scouting::WhatAmI; - +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; #[cfg(feature = "loki")] use url::Url; +use zenoh::{ + config::{Config, EndPoint, ModeDependentValue, PermissionsConf, ValidatedMap}, + core::Result, + scouting::WhatAmI, +}; #[cfg(feature = "loki")] const LOKI_ENDPOINT_VAR: &str = "LOKI_ENDPOINT"; From 5a841eef5dcfb8dc62d3ae9b96bafb6ab1661858 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Mon, 29 Apr 2024 17:29:02 +0200 Subject: [PATCH 317/357] chore: add pre-commit hook config --- .pre-commit-config.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..86dc1703ed --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,21 @@ +# +# Copyright (c) 2024 ZettaScale Technology +# +# This program and the accompanying materials are made available under the +# terms of the Eclipse Public License 2.0 which is available at +# http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +# which is available at https://www.apache.org/licenses/LICENSE-2.0. +# +# SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +# +# Contributors: +# ZettaScale Zenoh Team, +# +repos: + - repo: local + hooks: + - id: fmt + name: fmt + entry: cargo fmt -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate" + language: system + types: [rust] From 42bee876f968a144016e71bfb9fbce02069e88b3 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 2 May 2024 10:44:59 +0200 Subject: [PATCH 318/357] fix: fix missing formatting --- zenoh/src/lib.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index bb2bccb869..58e17fc2ea 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -205,10 +205,9 @@ pub mod session { /// Tools to access information about the current zenoh [`Session`](crate::Session). pub mod info { - pub use crate::api::info::PeersZenohIdBuilder; - pub use crate::api::info::RoutersZenohIdBuilder; - pub use crate::api::info::SessionInfo; - pub use crate::api::info::ZenohIdBuilder; + pub use crate::api::info::{ + PeersZenohIdBuilder, RoutersZenohIdBuilder, SessionInfo, ZenohIdBuilder, + }; } /// Sample primitives From c4dfd101701527e9e1441f59f867aa49e529cc6e Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Thu, 2 May 2024 12:06:34 +0300 Subject: [PATCH 319/357] Polish SHM examples --- examples/Cargo.toml | 5 +++ examples/examples/z_alloc_shm.rs | 32 +++++------------ examples/examples/z_bytes_shm.rs | 17 ++++++--- examples/examples/z_get_shm.rs | 44 ++++++++--------------- examples/examples/z_ping_shm.rs | 37 +++++++------------ examples/examples/z_posix_shm_provider.rs | 44 +++++++++++++++++++++++ examples/examples/z_pub_shm.rs | 43 +++++++--------------- examples/examples/z_pub_shm_thr.rs | 37 +++++++------------ examples/examples/z_queryable_shm.rs | 44 ++++++++--------------- zenoh/src/api/bytes.rs | 2 +- 10 files changed, 140 insertions(+), 165 deletions(-) create mode 100644 examples/examples/z_posix_shm_provider.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 263653028a..90281ae558 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -172,3 +172,8 @@ required-features = ["unstable", "shared-memory"] name = "z_bytes_shm" path = "examples/z_bytes_shm.rs" required-features = ["unstable", "shared-memory"] + +[[example]] +name = "z_posix_shm_provider" +path = "examples/z_posix_shm_provider.rs" +required-features = ["unstable", "shared-memory"] diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index acff39379c..a01de8d2fa 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -21,29 +21,15 @@ async fn main() { } async fn run() -> ZResult<()> { - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(65536, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(65536) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs index d9ab4e1f82..5c582e56e6 100644 --- a/examples/examples/z_bytes_shm.rs +++ b/examples/examples/z_bytes_shm.rs @@ -21,6 +21,7 @@ use zenoh::{ fn main() { // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs let backend = PosixSharedMemoryProviderBackend::builder() .with_size(4096) .unwrap() @@ -32,11 +33,17 @@ fn main() { .backend(backend) .res(); - // Prepare a layout for allocations - let layout = provider.alloc_layout().size(1024).res().unwrap(); - - // allocate an SHM buffer (ZShmMut) - let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut owned_shm_buf_mut = provider + .alloc_layout() + .size(1024) + .res() + .unwrap() + .alloc() + .res() + .unwrap(); // mutable and immutable API let _data: &[u8] = &owned_shm_buf_mut; diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 2773348fd0..7466f6eabc 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -33,43 +33,29 @@ async fn main() { println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); - println!("Creating POSIX SHM backend..."); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - println!("Creating SHM Provider with POSIX backend..."); - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example println!("Allocating Shared Memory Buffer..."); - let layout = shared_memory_provider + let mut sbuf = provider .alloc_layout() .size(1024) .res() - .unwrap(); - - let mut sbuf = layout + .unwrap() .alloc() .with_policy::>() .res_async() diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 372967f6e8..f19c4274a4 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -44,34 +44,23 @@ fn main() { let mut samples = Vec::with_capacity(n); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(size) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); - let buf = shared_memory_provider + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut buf = provider .alloc_layout() .size(size) .res() diff --git a/examples/examples/z_posix_shm_provider.rs b/examples/examples/z_posix_shm_provider.rs new file mode 100644 index 0000000000..cdf502bc61 --- /dev/null +++ b/examples/examples/z_posix_shm_provider.rs @@ -0,0 +1,44 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::prelude::*; + +fn main() { + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + + // Total amount of shared memory to allocate + let size = 4096; + + // An alignment for POSIX SHM provider + // Due to internal optimization, all allocations will be aligned corresponding to this alignment, + // so the provider will be able to satisfy allocation layouts with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // A layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); + + // Build a provider backend + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let _shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); +} diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 356737c3cd..d2a87a59cc 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -32,48 +32,31 @@ async fn main() -> Result<(), ZError> { println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); - println!("Creating POSIX SHM backend..."); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - println!("Creating SHM Provider with POSIX backend..."); - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); let publisher = session.declare_publisher(&path).await.unwrap(); + // Create allocation layout for series of similar allocations println!("Allocating Shared Memory Buffer..."); - let layout = shared_memory_provider - .alloc_layout() - .size(1024) - .res() - .unwrap(); + let layout = provider.alloc_layout().size(1024).res().unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { tokio::time::sleep(std::time::Duration::from_secs(1)).await; + // Allocate particular SHM buffer using pre-created layout let mut sbuf = layout .alloc() .with_policy::>() diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 0b94304321..0d44fbe6ee 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -28,34 +28,23 @@ async fn main() { let z = zenoh::open(config).await.unwrap(); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(sm_size, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(sm_size) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); - let mut buf = shared_memory_provider + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut buf = provider .alloc_layout() .size(size) .res() diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index 49939dcb0a..ed2320d2c5 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -32,31 +32,16 @@ async fn main() { println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); - println!("Creating POSIX SHM backend..."); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - println!("Creating SHM Provider with POSIX backend..."); - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); @@ -83,14 +68,15 @@ async fn main() { } println!(")"); + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example println!("Allocating Shared Memory Buffer..."); - let layout = shared_memory_provider + let mut sbuf = provider .alloc_layout() .size(1024) .res() - .unwrap(); - - let mut sbuf = layout + .unwrap() .alloc() .with_policy::>() .res_async() diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 874f37ba8c..8a53d5ba34 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1833,12 +1833,12 @@ mod tests { #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::api::{ + buffer::zshm::{zshm, ZShm}, protocol_implementations::posix::{ posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, provider::shared_memory_provider::SharedMemoryProviderBuilder, - buffer::zshm::{zshm, ZShm}, }; const NUM: usize = 1_000; From 446fa2fc28770f92cb44d456dc9638faf4263761 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Thu, 2 May 2024 12:15:20 +0300 Subject: [PATCH 320/357] fix lints --- zenoh/tests/bytes.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index f8eb11bf63..6de12ab63f 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -15,7 +15,7 @@ #[test] #[cfg(all(feature = "shared-memory", feature = "unstable"))] fn shm_bytes_single_buf() { - use zenoh::prelude::r#async::*; + use zenoh::prelude::*; // create an SHM backend... let backend = PosixSharedMemoryProviderBackend::builder() From 74b444efa1a636ed7f2c7099a53914a6f7da98ee Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Thu, 2 May 2024 12:59:35 +0300 Subject: [PATCH 321/357] fix lint --- examples/examples/z_ping_shm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index f19c4274a4..354f11d789 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -60,7 +60,7 @@ fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let mut buf = provider + let buf = provider .alloc_layout() .size(size) .res() From e263d80a076b6f7c4903e7b8952358c7248a5519 Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Thu, 2 May 2024 20:19:31 +0800 Subject: [PATCH 322/357] refactor: downsampling test (#999) * test: run downsampling_by_keyexpr test in parallel * test: refine the downsampling test * fix: loosen the condition * test: stability [1/3] * test: stability [2/3] * test: stability [3/3] --- .config/nextest.toml | 3 +- Cargo.lock | 1 - zenoh/tests/interceptors.rs | 408 ++++++++++++++++-------------------- 3 files changed, 182 insertions(+), 230 deletions(-) diff --git a/.config/nextest.toml b/.config/nextest.toml index 01b3ae9147..37b7dfcea0 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -10,8 +10,7 @@ test(=zenoh_session_unicast) | test(=zenoh_session_multicast) | test(=transport_tcp_intermittent) | test(=transport_tcp_intermittent_for_lowlatency_transport) | -test(=three_node_combination) | -test(=downsampling_by_keyexpr) +test(=three_node_combination) """ threads-required = 'num-cpus' slow-timeout = { period = "60s", terminate-after = 6 } diff --git a/Cargo.lock b/Cargo.lock index 5acc4eb0d4..36078d0238 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5454,7 +5454,6 @@ dependencies = [ "ron", "serde", "tokio", - "tracing", "zenoh-collections", "zenoh-macros", "zenoh-result", diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index b8f9164cfd..dbbdd7ea0d 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -11,273 +11,227 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::sync::{Arc, Mutex}; -use zenoh_core::zlock; - +use std::collections::HashMap; +use std::sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, +}; +use zenoh::prelude::sync::*; +use zenoh::prelude::Config; +use zenoh_config::{DownsamplingItemConf, DownsamplingRuleConf, InterceptorFlow}; + +// Tokio's time granularity on different platforms #[cfg(target_os = "windows")] static MINIMAL_SLEEP_INTERVAL_MS: u64 = 17; #[cfg(not(target_os = "windows"))] static MINIMAL_SLEEP_INTERVAL_MS: u64 = 2; -struct IntervalCounter { - first_tick: bool, - last_time: std::time::Instant, - count: u32, - total_time: std::time::Duration, -} +static REPEAT: usize = 3; +static WARMUP_MS: u64 = 500; -impl IntervalCounter { - fn new() -> IntervalCounter { - IntervalCounter { - first_tick: true, - last_time: std::time::Instant::now(), - count: 0, - total_time: std::time::Duration::from_secs(0), - } - } +fn build_config( + locator: &str, + ds_config: Vec, + flow: InterceptorFlow, +) -> (Config, Config) { + let mut pub_config = Config::default(); + pub_config + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); - fn tick(&mut self) { - let curr_time = std::time::Instant::now(); - if self.first_tick { - self.first_tick = false; - } else { - self.total_time += curr_time - self.last_time; - self.count += 1; - } - self.last_time = curr_time; - } + let mut sub_config = Config::default(); + sub_config + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); - fn get_middle(&self) -> u32 { - assert!(self.count > 0); - self.total_time.as_millis() as u32 / self.count - } + sub_config.listen.endpoints = vec![locator.parse().unwrap()]; + pub_config.connect.endpoints = vec![locator.parse().unwrap()]; - fn get_count(&self) -> u32 { - self.count - } + match flow { + InterceptorFlow::Egress => pub_config.set_downsampling(ds_config).unwrap(), + InterceptorFlow::Ingress => sub_config.set_downsampling(ds_config).unwrap(), + }; - fn check_middle(&self, ms: u32) { - let middle = self.get_middle(); - println!("Interval {}, count: {}, middle: {}", ms, self.count, middle); - assert!(middle + 1 >= ms); - } + (pub_config, sub_config) } -fn downsampling_by_keyexpr_impl(egress: bool) { - zenoh_util::try_init_log_from_env(); - - use zenoh::prelude::sync::*; - - let ds_cfg = format!( - r#" - [ - {{ - flow: "{}", - rules: [ - {{ key_expr: "test/downsamples_by_keyexp/r100", freq: 10, }}, - {{ key_expr: "test/downsamples_by_keyexp/r50", freq: 20, }} - ], - }}, - ] "#, - (if egress { "egress" } else { "ingress" }) +fn downsampling_test( + pub_config: Config, + sub_config: Config, + ke_prefix: &str, + ke_of_rates: Vec>, + rate_check: F, +) where + F: Fn(KeyExpr<'_>, usize) -> bool + Send + 'static, +{ + type Counters<'a> = Arc, AtomicUsize>>; + let counters: Counters = Arc::new( + ke_of_rates + .clone() + .into_iter() + .map(|ke| (ke, AtomicUsize::new(0))) + .collect(), ); - // declare subscriber - let mut config_sub = Config::default(); - if !egress { - config_sub.insert_json5("downsampling", &ds_cfg).unwrap(); - } - config_sub - .insert_json5("listen/endpoints", r#"["tcp/127.0.0.1:38446"]"#) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let zenoh_sub = zenoh::open(config_sub).res().unwrap(); - - let counter_r100 = Arc::new(Mutex::new(IntervalCounter::new())); - let counter_r100_clone = counter_r100.clone(); - let counter_r50 = Arc::new(Mutex::new(IntervalCounter::new())); - let counter_r50_clone = counter_r50.clone(); - - let total_count = Arc::new(Mutex::new(0)); - let total_count_clone = total_count.clone(); - - let _sub = zenoh_sub - .declare_subscriber("test/downsamples_by_keyexp/*") - .callback(move |sample| { - let mut count = zlock!(total_count_clone); - *count += 1; - if sample.key_expr.as_str() == "test/downsamples_by_keyexp/r100" { - zlock!(counter_r100).tick(); - } else if sample.key_expr.as_str() == "test/downsamples_by_keyexp/r50" { - zlock!(counter_r50).tick(); + let sub_session = zenoh::open(sub_config).res().unwrap(); + let _sub = sub_session + .declare_subscriber(format!("{ke_prefix}/*")) + .callback({ + let counters = counters.clone(); + move |sample| { + counters + .get(&sample.key_expr) + .map(|ctr| ctr.fetch_add(1, Ordering::SeqCst)); } }) .res() .unwrap(); - // declare publisher - let mut config_pub = Config::default(); - if egress { - config_pub.insert_json5("downsampling", &ds_cfg).unwrap(); + let is_terminated = Arc::new(AtomicBool::new(false)); + let c_is_terminated = is_terminated.clone(); + let handle = std::thread::spawn(move || { + let pub_session = zenoh::open(pub_config).res().unwrap(); + let publishers: Vec<_> = ke_of_rates + .into_iter() + .map(|ke| pub_session.declare_publisher(ke).res().unwrap()) + .collect(); + let interval = std::time::Duration::from_millis(MINIMAL_SLEEP_INTERVAL_MS); + while !c_is_terminated.load(Ordering::SeqCst) { + publishers.iter().for_each(|publ| { + publ.put("message").res().unwrap(); + }); + std::thread::sleep(interval); + } + }); + + std::thread::sleep(std::time::Duration::from_millis(WARMUP_MS)); + counters.iter().for_each(|(_, ctr)| { + ctr.swap(0, Ordering::SeqCst); + }); + + for _ in 0..REPEAT { + std::thread::sleep(std::time::Duration::from_secs(1)); + counters.iter().for_each(|(ke, ctr)| { + let rate = ctr.swap(0, Ordering::SeqCst); + if !rate_check(ke.into(), rate) { + panic!("The test failed on the {ke:?} at the rate of {rate:?}"); + } + }); } - config_pub - .insert_json5("connect/endpoints", r#"["tcp/127.0.0.1:38446"]"#) - .unwrap(); - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let zenoh_pub = zenoh::open(config_pub).res().unwrap(); - let publisher_r100 = zenoh_pub - .declare_publisher("test/downsamples_by_keyexp/r100") - .res() - .unwrap(); - - let publisher_r50 = zenoh_pub - .declare_publisher("test/downsamples_by_keyexp/r50") - .res() - .unwrap(); - let publisher_all = zenoh_pub - .declare_publisher("test/downsamples_by_keyexp/all") - .res() - .unwrap(); - - // WARN(yuyuan): 2 ms is the limit of tokio - let interval = std::time::Duration::from_millis(MINIMAL_SLEEP_INTERVAL_MS); - let messages_count = 1000; - for i in 0..messages_count { - publisher_r100.put(format!("message {}", i)).res().unwrap(); - publisher_r50.put(format!("message {}", i)).res().unwrap(); - publisher_all.put(format!("message {}", i)).res().unwrap(); - std::thread::sleep(interval); + let _ = is_terminated.swap(true, Ordering::SeqCst); + if let Err(err) = handle.join() { + panic!("Failed to join the handle due to {err:?}"); } +} + +fn downsampling_by_keyexpr_impl(flow: InterceptorFlow) { + let ke_prefix = "test/downsamples_by_keyexp"; + let locator = "tcp/127.0.0.1:38446"; + + let ke_10hz: KeyExpr = format!("{ke_prefix}/10hz").try_into().unwrap(); + let ke_20hz: KeyExpr = format!("{ke_prefix}/20hz").try_into().unwrap(); + + let ds_config = DownsamplingItemConf { + flow, + interfaces: None, + rules: vec![ + DownsamplingRuleConf { + key_expr: ke_10hz.clone().into(), + freq: 10.0, + }, + DownsamplingRuleConf { + key_expr: ke_20hz.clone().into(), + freq: 20.0, + }, + ], + }; - for _ in 0..100 { - if *zlock!(total_count) >= messages_count - && zlock!(counter_r50_clone).get_count() > 0 - && zlock!(counter_r100_clone).get_count() > 0 - { - break; + let ke_of_rates: Vec> = ds_config + .rules + .iter() + .map(|x| x.key_expr.clone().into()) + .collect(); + + let rate_check = move |ke: KeyExpr, rate: usize| -> bool { + tracing::info!("keyexpr: {ke}, rate: {rate}"); + if ke == ke_10hz { + rate > 0 && rate <= 10 + 1 + } else if ke == ke_20hz { + rate > 0 && rate <= 20 + 1 + } else { + tracing::error!("Shouldn't reach this case. Invalid keyexpr {ke} detected."); + false } - std::thread::sleep(std::time::Duration::from_millis(100)); - } - assert!(*zlock!(total_count) >= messages_count); + }; + + let (pub_config, sub_config) = build_config(locator, vec![ds_config], flow); - zlock!(counter_r50_clone).check_middle(50); - zlock!(counter_r100_clone).check_middle(100); + downsampling_test(pub_config, sub_config, ke_prefix, ke_of_rates, rate_check); } #[test] fn downsampling_by_keyexpr() { - downsampling_by_keyexpr_impl(true); - downsampling_by_keyexpr_impl(false); + zenoh_util::try_init_log_from_env(); + downsampling_by_keyexpr_impl(InterceptorFlow::Ingress); + downsampling_by_keyexpr_impl(InterceptorFlow::Egress); } #[cfg(unix)] -fn downsampling_by_interface_impl(egress: bool) { - zenoh_util::try_init_log_from_env(); - - use zenoh::prelude::sync::*; - - let ds_cfg = format!( - r#" - [ - {{ - interfaces: ["lo", "lo0"], - flow: "{0}", - rules: [ - {{ key_expr: "test/downsamples_by_interface/r100", freq: 10, }}, - ], - }}, - {{ - interfaces: ["some_unknown_interface"], - flow: "{0}", - rules: [ - {{ key_expr: "test/downsamples_by_interface/all", freq: 10, }}, - ], - }}, - ] "#, - (if egress { "egress" } else { "ingress" }) - ); - // declare subscriber - let mut config_sub = Config::default(); - config_sub - .insert_json5("listen/endpoints", r#"["tcp/127.0.0.1:38447"]"#) - .unwrap(); - if !egress { - config_sub.insert_json5("downsampling", &ds_cfg).unwrap(); +fn downsampling_by_interface_impl(flow: InterceptorFlow) { + let ke_prefix = "test/downsamples_by_interface"; + let locator = "tcp/127.0.0.1:38447"; + + let ke_10hz: KeyExpr = format!("{ke_prefix}/10hz").try_into().unwrap(); + let ke_no_effect: KeyExpr = format!("{ke_prefix}/no_effect").try_into().unwrap(); + let ke_of_rates: Vec> = vec![ke_10hz.clone(), ke_no_effect.clone()]; + + let ds_config = vec![ + DownsamplingItemConf { + flow, + interfaces: Some(vec!["lo".to_string(), "lo0".to_string()]), + rules: vec![DownsamplingRuleConf { + key_expr: ke_10hz.clone().into(), + freq: 10.0, + }], + }, + DownsamplingItemConf { + flow, + interfaces: Some(vec!["some_unknown_interface".to_string()]), + rules: vec![DownsamplingRuleConf { + key_expr: ke_no_effect.clone().into(), + freq: 10.0, + }], + }, + ]; + + let rate_check = move |ke: KeyExpr, rate: usize| -> bool { + tracing::info!("keyexpr: {ke}, rate: {rate}"); + if ke == ke_10hz { + rate > 0 && rate <= 10 + 1 + } else if ke == ke_no_effect { + rate > 10 + } else { + tracing::error!("Shouldn't reach this case. Invalid keyexpr {ke} detected."); + false + } }; - let zenoh_sub = zenoh::open(config_sub).res().unwrap(); - - let counter_r100 = Arc::new(Mutex::new(IntervalCounter::new())); - let counter_r100_clone = counter_r100.clone(); - let total_count = Arc::new(Mutex::new(0)); - let total_count_clone = total_count.clone(); + let (pub_config, sub_config) = build_config(locator, ds_config, flow); - let _sub = zenoh_sub - .declare_subscriber("test/downsamples_by_interface/*") - .callback(move |sample| { - let mut count = zlock!(total_count_clone); - *count += 1; - if sample.key_expr.as_str() == "test/downsamples_by_interface/r100" { - zlock!(counter_r100).tick(); - } - }) - .res() - .unwrap(); - - // declare publisher - let mut config_pub = Config::default(); - config_pub - .insert_json5("connect/endpoints", r#"["tcp/127.0.0.1:38447"]"#) - .unwrap(); - if egress { - config_pub.insert_json5("downsampling", &ds_cfg).unwrap(); - } - let zenoh_pub = zenoh::open(config_pub).res().unwrap(); - let publisher_r100 = zenoh_pub - .declare_publisher("test/downsamples_by_interface/r100") - .res() - .unwrap(); - - let publisher_all = zenoh_pub - .declare_publisher("test/downsamples_by_interface/all") - .res() - .unwrap(); - - // WARN(yuyuan): 2 ms is the limit of tokio - let interval = std::time::Duration::from_millis(MINIMAL_SLEEP_INTERVAL_MS); - let messages_count = 1000; - for i in 0..messages_count { - publisher_r100.put(format!("message {}", i)).res().unwrap(); - publisher_all.put(format!("message {}", i)).res().unwrap(); - - std::thread::sleep(interval); - } - - for _ in 0..100 { - if *zlock!(total_count) >= messages_count && zlock!(counter_r100_clone).get_count() > 0 { - break; - } - std::thread::sleep(std::time::Duration::from_millis(100)); - } - assert!(*zlock!(total_count) >= messages_count); - - zlock!(counter_r100_clone).check_middle(100); + downsampling_test(pub_config, sub_config, ke_prefix, ke_of_rates, rate_check); } #[cfg(unix)] #[test] fn downsampling_by_interface() { - downsampling_by_interface_impl(true); - downsampling_by_interface_impl(false); + zenoh_util::try_init_log_from_env(); + downsampling_by_interface_impl(InterceptorFlow::Ingress); + downsampling_by_interface_impl(InterceptorFlow::Egress); } #[test] From c6be88277285dc727a2e0a8202eb23bfdd0ef33e Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Thu, 2 May 2024 14:26:41 +0200 Subject: [PATCH 323/357] feat: Check licenses of the dependency graph (#1001) --- .github/workflows/ci.yml | 11 ++++++----- deny.toml | 24 ++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 5 deletions(-) create mode 100644 deny.toml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 33432f827a..a8552d58dd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,14 +38,12 @@ jobs: - name: Clone this repository uses: actions/checkout@v4 - - name: Install Rust toolchain - run: | - rustup show - rustup component add rustfmt clippy - - name: Setup rust-cache uses: Swatinem/rust-cache@v2 + - name: Install dependencies + run: cargo +stable install cargo-deny --locked + - name: Code format check run: cargo fmt --check @@ -69,6 +67,9 @@ jobs: - name: Run doctests run: cargo test --doc + - name: Check licenses + run: cargo deny check licenses + test: name: Unit tests on ${{ matrix.os }} runs-on: ${{ matrix.os }} diff --git a/deny.toml b/deny.toml new file mode 100644 index 0000000000..1a4a14f763 --- /dev/null +++ b/deny.toml @@ -0,0 +1,24 @@ +# NOTE: Before allowing a new license, make sure it is approved by the Eclipse Foundation. +# A list of approved third party licenses is available at: https://www.eclipse.org/legal/licenses.php. +[licenses] +allow = [ + "MIT", + "Apache-2.0", + "EPL-2.0", + "ISC", + "Unicode-DFS-2016", + "Zlib", + "BSD-2-Clause", + "BSD-3-Clause", + "CC0-1.0", + "MPL-2.0", + "OpenSSL", +] + +# This was copied from https://github.com/EmbarkStudios/cargo-deny/blob/main/deny.toml#L64 +[[licenses.clarify]] +crate = "ring" +expression = "ISC AND MIT AND OpenSSL" +license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] +[licenses.private] +ignore = true From 609798ff1a37b90c7b76d1d933cbccc73a911474 Mon Sep 17 00:00:00 2001 From: Gabriele Baldoni Date: Thu, 2 May 2024 12:31:42 +0000 Subject: [PATCH 324/357] fix(compression): adding a workaround to send uncompressed OpenAck (#1005) Signed-off-by: gabrik --- io/zenoh-transport/src/unicast/link.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index 54543b8e6e..aada35c6c8 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -286,7 +286,21 @@ impl MaybeOpenAck { pub(crate) async fn send_open_ack(mut self) -> ZResult<()> { if let Some(msg) = self.open_ack { - return self.link.send(&msg.into()).await.map(|_| {}); + zcondfeat!( + "transport_compression", + { + // !!! Workaround !!! as the state of the link is set with compression once the OpenSyn is received. + // Here we are disabling the compression just to send the OpenAck (that is not supposed to be compressed). + // Then then we re-enable it, in case it was enabled, after the OpenAck has been sent. + let compression = self.link.inner.config.batch.is_compression; + self.link.inner.config.batch.is_compression = false; + self.link.send(&msg.into()).await?; + self.link.inner.config.batch.is_compression = compression; + }, + { + self.link.send(&msg.into()).await?; + } + ) } Ok(()) } From b27a289931dfb317ee5e6696d23cd16251d9b61e Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 2 May 2024 16:39:57 +0200 Subject: [PATCH 325/357] Pre-commit fmt --- ci/nostd-check/src/bin/nostd_check.rs | 1 + ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs | 4 ++-- .../src/queryable_get/bin/z_queryable_get.rs | 7 +++---- commons/zenoh-shm/src/header/storage.rs | 2 +- commons/zenoh-shm/src/header/subscription.rs | 2 +- commons/zenoh-shm/src/posix_shm/segment.rs | 2 +- commons/zenoh-shm/src/watchdog/confirmator.rs | 4 ++-- commons/zenoh-shm/src/watchdog/periodic_task.rs | 9 +++++---- commons/zenoh-shm/src/watchdog/storage.rs | 2 +- .../src/unicast/universal/reliability.rs | 11 +++++------ 10 files changed, 22 insertions(+), 22 deletions(-) diff --git a/ci/nostd-check/src/bin/nostd_check.rs b/ci/nostd-check/src/bin/nostd_check.rs index 74f85ae06c..b243c9d182 100644 --- a/ci/nostd-check/src/bin/nostd_check.rs +++ b/ci/nostd-check/src/bin/nostd_check.rs @@ -15,6 +15,7 @@ #![no_std] use core::panic::PanicInfo; + use getrandom::{register_custom_getrandom, Error}; use linked_list_allocator::LockedHeap; #[allow(unused_imports)] diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index 2091f833a1..7b1b017c7d 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::*; + +use zenoh::{config::Config, prelude::*}; #[tokio::main] async fn main() { diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 43cb038f94..69335d674e 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -11,10 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::convert::TryFrom; -use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::*; +use std::{convert::TryFrom, time::Duration}; + +use zenoh::{config::Config, prelude::*}; #[tokio::main] async fn main() { diff --git a/commons/zenoh-shm/src/header/storage.rs b/commons/zenoh-shm/src/header/storage.rs index c09fa83dba..36e004511a 100644 --- a/commons/zenoh-shm/src/header/storage.rs +++ b/commons/zenoh-shm/src/header/storage.rs @@ -11,12 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use lazy_static::lazy_static; use std::{ collections::LinkedList, sync::{Arc, Mutex}, }; +use lazy_static::lazy_static; use zenoh_result::{zerror, ZResult}; use super::{ diff --git a/commons/zenoh-shm/src/header/subscription.rs b/commons/zenoh-shm/src/header/subscription.rs index 49ad170aea..5efe54a7f7 100644 --- a/commons/zenoh-shm/src/header/subscription.rs +++ b/commons/zenoh-shm/src/header/subscription.rs @@ -11,12 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use lazy_static::lazy_static; use std::{ collections::BTreeMap, sync::{Arc, Mutex}, }; +use lazy_static::lazy_static; use zenoh_result::{zerror, ZResult}; use super::{ diff --git a/commons/zenoh-shm/src/posix_shm/segment.rs b/commons/zenoh-shm/src/posix_shm/segment.rs index ab79d0fcc1..5458ab3e3e 100644 --- a/commons/zenoh-shm/src/posix_shm/segment.rs +++ b/commons/zenoh-shm/src/posix_shm/segment.rs @@ -117,7 +117,7 @@ where unsafe { *(self.shmem.as_ptr() as *mut usize) } } - // TODO: dead code warning occurs because of `tested_crate_module!()` macro when feature `test` is not enabled. Better to fix that + // TODO: dead code warning occurs because of `tested_crate_module!()` macro when feature `test` is not enabled. Better to fix that #[allow(dead_code)] pub fn is_empty(&self) -> bool { unsafe { *(self.shmem.as_ptr() as *mut usize) == 0 } diff --git a/commons/zenoh-shm/src/watchdog/confirmator.rs b/commons/zenoh-shm/src/watchdog/confirmator.rs index 54c2d233dc..b84a76dc50 100644 --- a/commons/zenoh-shm/src/watchdog/confirmator.rs +++ b/commons/zenoh-shm/src/watchdog/confirmator.rs @@ -22,8 +22,8 @@ use lazy_static::lazy_static; use zenoh_result::{zerror, ZResult}; use super::{ - periodic_task::PeriodicTask, descriptor::{Descriptor, OwnedDescriptor, SegmentID}, + periodic_task::PeriodicTask, segment::Segment, }; @@ -117,7 +117,7 @@ pub struct WatchdogConfirmator { impl WatchdogConfirmator { fn new(interval: Duration) -> Self { let segment_transactions = Arc::>>::default(); - + let c_segment_transactions = segment_transactions.clone(); let mut segments: Vec<(Arc, BTreeMap)> = vec![]; let task = PeriodicTask::new("Watchdog Confirmator".to_owned(), interval, move || { diff --git a/commons/zenoh-shm/src/watchdog/periodic_task.rs b/commons/zenoh-shm/src/watchdog/periodic_task.rs index 98cf8fbba7..08a6ee18d3 100644 --- a/commons/zenoh-shm/src/watchdog/periodic_task.rs +++ b/commons/zenoh-shm/src/watchdog/periodic_task.rs @@ -23,7 +23,8 @@ use std::{ use thread_priority::ThreadBuilder; #[cfg(unix)] use thread_priority::{ - set_current_thread_priority, RealtimeThreadSchedulePolicy, ThreadPriority, ThreadPriorityValue, ThreadSchedulePolicy::Realtime + set_current_thread_priority, RealtimeThreadSchedulePolicy, ThreadPriority, ThreadPriorityValue, + ThreadSchedulePolicy::Realtime, }; pub struct PeriodicTask { @@ -44,7 +45,7 @@ impl PeriodicTask { let running = Arc::new(AtomicBool::new(true)); let c_running = running.clone(); - + #[cfg(unix)] let builder = ThreadBuilder::default() .name(name) @@ -54,7 +55,7 @@ impl PeriodicTask { // TODO: deal with windows realtime scheduling #[cfg(windows)] let builder = ThreadBuilder::default().name(name); - + let _ = builder.spawn(move |result| { if let Err(e) = result { #[cfg(windows)] @@ -79,7 +80,7 @@ impl PeriodicTask { let cycle_start = std::time::Instant::now(); f(); - + // sleep for next iteration let elapsed = cycle_start.elapsed(); if elapsed < interval { diff --git a/commons/zenoh-shm/src/watchdog/storage.rs b/commons/zenoh-shm/src/watchdog/storage.rs index 5744a273a0..1b04ad313c 100644 --- a/commons/zenoh-shm/src/watchdog/storage.rs +++ b/commons/zenoh-shm/src/watchdog/storage.rs @@ -11,12 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use lazy_static::lazy_static; use std::{ collections::BTreeSet, sync::{Arc, Mutex}, }; +use lazy_static::lazy_static; use zenoh_result::{zerror, ZResult}; use super::{allocated_watchdog::AllocatedWatchdog, descriptor::OwnedDescriptor, segment::Segment}; diff --git a/io/zenoh-transport/src/unicast/universal/reliability.rs b/io/zenoh-transport/src/unicast/universal/reliability.rs index b3637bee27..7aece8d077 100644 --- a/io/zenoh-transport/src/unicast/universal/reliability.rs +++ b/io/zenoh-transport/src/unicast/universal/reliability.rs @@ -11,15 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::convert::TryInto; -use std::fmt; - -use super::common::seq_num::SeqNum; -use super::core::u64; +use std::{convert::TryInto, fmt}; use zenoh_result::{ZError, ZErrorKind, ZResult}; use zenoh_util::zerror; +use super::{common::seq_num::SeqNum, core::u64}; + pub(super) struct ReliabilityQueue { sn: SeqNum, index: usize, @@ -249,9 +247,10 @@ impl fmt::Debug for ReliabilityQueue { #[cfg(test)] mod tests { - use super::*; use rand::{thread_rng, Rng}; + use super::*; + #[test] fn reliability_queue_simple() { let size = 2; From 1c5381814b2d69c99d667d3a441dcb86e0a85aa1 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Thu, 2 May 2024 18:18:09 +0200 Subject: [PATCH 326/357] Fix compression test (#1006) --- io/zenoh-transport/src/unicast/link.rs | 4 +++- io/zenoh-transport/tests/unicast_compression.rs | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index aada35c6c8..9a8f7f3dbc 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -66,7 +66,9 @@ impl TransportLinkUnicast { .batch .is_compression .then_some(BBuf::with_capacity( - lz4_flex::block::get_maximum_output_size(self.config.batch.mtu as usize), + lz4_flex::block::get_maximum_output_size( + self.config.batch.max_buffer_size() + ), )), None ), diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index ab6a9ff414..91784a3497 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -178,7 +178,8 @@ mod tests { #[cfg(feature = "shared-memory")] false, lowlatency_transport, - ); + ) + .compression(true); let router_manager = TransportManager::builder() .zid(router_id) .whatami(WhatAmI::Router) From 7a4744576e21c85d31b1527b2c4bf2230e5152e8 Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Fri, 3 May 2024 01:20:00 +0800 Subject: [PATCH 327/357] docs: format and add a link to ZRuntime config (#1000) * docs: format and add a link to ZRuntime config * docs: format and remove the tx thread option in DEFAULT_CONFIG --- DEFAULT_CONFIG.json5 | 45 +++++++++++++++++++++----------------------- README.md | 38 ++++++++++++++++++++----------------- 2 files changed, 42 insertions(+), 41 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index ec9a827777..430268935a 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -19,8 +19,8 @@ /// Which endpoints to connect to. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. - /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: - /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: + /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 connect: { /// timeout waiting for all endpoints connected (0: no retry, -1: infinite timeout) /// Accepts a single value or different values for router, peer and client. @@ -30,16 +30,16 @@ // "/
" ], - /// Global connect configuration, + /// Global connect configuration, /// Accepts a single value or different values for router, peer and client. /// The configuration can also be specified for the separate endpoint /// it will override the global one /// E.g. tcp/192.168.0.1:7447#retry_period_init_ms=20000;retry_period_max_ms=10000" - + /// exit from application, if timeout exceed exit_on_failure: { router: false, peer: false, client: true }, - /// connect establishing retry configuration - retry: { + /// connect establishing retry configuration + retry: { /// intial wait timeout until next connect try period_init_ms: 1000, /// maximum wait timeout until next connect try @@ -52,7 +52,7 @@ /// Which endpoints to listen on. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, /// peers, or client can use to establish a zenoh session. - /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 listen: { /// timeout waiting for all listen endpoints (0: no retry, -1: infinite timeout) @@ -63,16 +63,16 @@ // "/
" ], - /// Global listen configuration, + /// Global listen configuration, /// Accepts a single value or different values for router, peer and client. /// The configuration can also be specified for the separate endpoint /// it will override the global one /// E.g. tcp/192.168.0.1:7447#exit_on_failure=false;retry_period_max_ms=1000" - + /// exit from application, if timeout exceed exit_on_failure: true, - /// listen retry configuration - retry: { + /// listen retry configuration + retry: { /// intial wait timeout until next try period_init_ms: 1000, /// maximum wait timeout until next try @@ -171,19 +171,19 @@ // flow: "egress", // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz // rules: [ - // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, + // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, // ], // }, // ], - // /// configure access control (ACL) rules + // /// configure access control (ACL) rules // access_control: { // ///[true/false] acl will be activated only if this is set to true // "enabled": false, // ///[deny/allow] default permission is deny (even if this is left empty or not specified) // "default_permission": "deny", // ///rule set for permissions allowing or denying access to key-expressions - // "rules": + // "rules": // [ // { // "actions": [ @@ -216,7 +216,7 @@ /// This option does not make LowLatency transport mandatory, the actual implementation of transport /// used will depend on Establish procedure and other party's settings /// - /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to /// enable 'lowlatency' you need to explicitly disable 'qos'. lowlatency: false, @@ -225,19 +225,19 @@ enabled: true, }, /// Enables compression on unicast communications. - /// Compression capabilities are negotiated during session establishment. + /// Compression capabilities are negotiated during session establishment. /// If both Zenoh nodes support compression, then compression is activated. compression: { enabled: false, }, - }, + }, multicast: { - /// Enables QoS on multicast communication. + /// Enables QoS on multicast communication. /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. qos: { enabled: false, }, - /// Enables compression on multicast communication. + /// Enables compression on multicast communication. /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. compression: { enabled: false, @@ -261,7 +261,7 @@ /// messages will be sent at the configured time interval. /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, /// set the actual keep_alive interval to one fourth of the lease time: i.e. send - /// 4 keep_alive messages in a lease period. Changing the lease time will have the + /// 4 keep_alive messages in a lease period. Changing the lease time will have the /// keep_alive messages sent more or less often. /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity /// check which considers a link as failed when no messages are received in 3.5 times the @@ -299,9 +299,6 @@ /// Higher values lead to a more aggressive batching but it will introduce additional latency. backoff: 100, }, - // Number of threads dedicated to transmission - // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) - // threads: 4, }, /// Configure the zenoh RX parameters of a link rx: { @@ -378,7 +375,7 @@ /// /// Plugins configurations /// - // + // // plugins_loading: { // // Enable plugins loading. // enabled: false, diff --git a/README.md b/README.md index fb268141a5..b09ea73d86 100644 --- a/README.md +++ b/README.md @@ -26,12 +26,12 @@ To install the latest release of the Zenoh router (`zenohd`) and its default plu ### Manual installation (all platforms) -All release packages can be downloaded from: - - https://download.eclipse.org/zenoh/zenoh/latest/ +All release packages can be downloaded from: + - https://download.eclipse.org/zenoh/zenoh/latest/ Each subdirectory has the name of the Rust target. See the platforms each target corresponds to on https://doc.rust-lang.org/stable/rustc/platform-support.html -Choose your platform and download the `.zip` file. +Choose your platform and download the `.zip` file. Unzip it where you want, and run the extracted `zenohd` binary. ### Linux Debian @@ -99,32 +99,32 @@ Zenoh's router is built as `target/release/zenohd`. All the examples are built i > **Windows users**: to properly execute the commands below in PowerShell you need to escape `"` characters as `\"`. - **put/store/get** - - run the Zenoh router with a memory storage: + - run the Zenoh router with a memory storage: `./target/release/zenohd --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` - in another shell run: `./target/release/examples/z_put` - then run `./target/release/examples/z_get` - the get should receive the stored publication. - **REST API using `curl` tool** - - run the Zenoh router with a memory storage: + - run the Zenoh router with a memory storage: `./target/release/zenohd --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` - - in another shell, do a publication via the REST API: + - in another shell, do a publication via the REST API: `curl -X PUT -d '"Hello World!"' http://localhost:8000/demo/example/test` - - get it back via the REST API: + - get it back via the REST API: `curl http://localhost:8000/demo/example/test` - **router admin space via the REST API** - - run the Zenoh router with permission to perform config changes via the admin space, and with a memory storage: + - run the Zenoh router with permission to perform config changes via the admin space, and with a memory storage: `./target/release/zenohd --adminspace-permissions=rw --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` - - in another shell, get info of the zenoh router via the zenoh admin space: + - in another shell, get info of the zenoh router via the zenoh admin space: `curl http://localhost:8000/@/router/local` - - get the volumes of the router (only memory by default): + - get the volumes of the router (only memory by default): `curl 'http://localhost:8000/@/router/local/**/volumes/*'` - - get the storages of the local router (the memory storage configured at startup on '/demo/example/**' should be present): + - get the storages of the local router (the memory storage configured at startup on '/demo/example/**' should be present): `curl 'http://localhost:8000/@/router/local/**/storages/*'` - - add another memory storage on `/demo/mystore/**`: + - add another memory storage on `/demo/mystore/**`: `curl -X PUT -H 'content-type:application/json' -d '{"key_expr":"demo/mystore/**","volume":"memory"}' http://localhost:8000/@/router/local/config/plugins/storage_manager/storages/mystore` - - check it has been created: + - check it has been created: `curl 'http://localhost:8000/@/router/local/**/storages/*'` **Configuration options:** @@ -133,8 +133,12 @@ A Zenoh configuration file can be provided via CLI to all Zenoh examples and the * `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file and the available options. + See other examples of Zenoh usage in [examples/](examples) +> [!NOTE] +> **Zenoh Runtime Configuration**: Starting from version 0.11.0-rc, Zenoh allows for configuring the number of worker threads and other advanced options of the runtime. For guidance on utilizing it, please refer to the [doc](https://docs.rs/zenoh-runtime/latest/zenoh_runtime/enum.ZRuntime.html). + ------------------------------- ## Zenoh router command line arguments `zenohd` accepts the following arguments: @@ -142,7 +146,7 @@ See other examples of Zenoh usage in [examples/](examples) * `--adminspace-permissions <[r|w|rw|none]>`: Configure the read and/or write permissions on the admin space. Default is read only. * `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file. All properties of this configuration are optional, so you may not need such a large configuration for your use-case. * `--cfg :`: allows you to change specific parts of the configuration right after it has been constructed. VALUE must be a valid JSON5 value, and key must be a path through the configuration file, where each element is separated by a `/`. When inserting in parts of the config that are arrays, you may use indexes, or may use `+` to indicate that you want to append your value to the array. `--cfg` passed values will always override any previously existing value for their key in the configuration. - * `-l, --listen ...`: An endpoint on which this router will listen for incoming sessions. + * `-l, --listen ...`: An endpoint on which this router will listen for incoming sessions. Repeat this option to open several listeners. By default, `tcp/[::]:7447` is used. The following endpoints are currently supported: - TCP: `tcp/:` - UDP: `udp/:` @@ -184,8 +188,8 @@ Otherwise, incompatibilities in memory mapping of shared types between `zenohd` By default the Zenoh router is delivered or built with 2 plugins. These may be configured through a configuration file, or through individual changes to the configuration via the `--cfg` CLI option or via zenoh puts on individual parts of the configuration. -> [!WARNING] -> Since `v0.6`, `zenohd` no longer loads every available plugin at startup. Instead, only configured plugins are loaded (after processing `--cfg` and `--plugin` options). Once `zenohd` is running, plugins can be hot-loaded and, if they support it, reconfigured at runtime by editing their configuration through the adminspace. +> [!WARNING] +> Since `v0.6`, `zenohd` no longer loads every available plugin at startup. Instead, only configured plugins are loaded (after processing `--cfg` and `--plugin` options). Once `zenohd` is running, plugins can be hot-loaded and, if they support it, reconfigured at runtime by editing their configuration through the adminspace. Note that the REST plugin is added to the configuration by the default value of the `--rest-http-port` CLI argument. @@ -198,5 +202,5 @@ This plugin allows you to easily define storages. These will store key-value pai ------------------------------- ## Troubleshooting -In case of troubles, please first check on [this page](https://zenoh.io/docs/getting-started/troubleshooting/) if the trouble and cause are already known. +In case of troubles, please first check on [this page](https://zenoh.io/docs/getting-started/troubleshooting/) if the trouble and cause are already known. Otherwise, you can ask a question on the [zenoh Discord server](https://discord.gg/vSDSpqnbkm), or [create an issue](https://github.com/eclipse-zenoh/zenoh/issues). From 77654a0b16da29716faa311f3f8a4040b8338bf0 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Fri, 3 May 2024 15:17:18 +0300 Subject: [PATCH 328/357] fix after merge --- examples/examples/z_alloc_shm.rs | 4 +-- examples/examples/z_get_shm.rs | 3 +- zenoh/src/lib.rs | 55 +++++++++++++++++--------------- 3 files changed, 33 insertions(+), 29 deletions(-) diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index a01de8d2fa..2db5e5a44e 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -40,14 +40,14 @@ async fn run() -> ZResult<()> { // This layout is reusable and can handle series of similar allocations let buffer_layout = { // OPTION 1: Simple (default) configuration: - let simple_layout = shared_memory_provider + let simple_layout = provider .alloc_layout() .size(512) .res() .unwrap(); // OPTION 2: Comprehensive configuration: - let _comprehensive_layout = shared_memory_provider + let _comprehensive_layout = provider .alloc_layout() .size(512) .alignment(AllocAlignment::new(2)) diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 7466f6eabc..39caf3a101 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; + +use clap::Parser; use zenoh::prelude::*; use zenoh_examples::CommonArgs; diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e6cf1e437a..caf961984b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -378,32 +378,35 @@ pub mod internal { #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { - pub use zenoh_shm::api::buffer::{ - zshm::{zshm, ZShm}, - zshmmut::{zshmmut, ZShmMut}, - }; - pub use zenoh_shm::api::client::{ - shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, - }; - pub use zenoh_shm::api::client_storage::{SharedMemoryClientStorage, GLOBAL_CLIENT_STORAGE}; - pub use zenoh_shm::api::common::types::{ChunkID, ProtocolID, SegmentID}; - pub use zenoh_shm::api::protocol_implementations::posix::{ - posix_shared_memory_client::PosixSharedMemoryClient, - posix_shared_memory_provider_backend::{ - LayoutedPosixSharedMemoryProviderBackendBuilder, PosixSharedMemoryProviderBackend, - PosixSharedMemoryProviderBackendBuilder, + pub use zenoh_shm::api::{ + buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, + }, + client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }, + client_storage::{SharedMemoryClientStorage, GLOBAL_CLIENT_STORAGE}, + common::types::{ChunkID, ProtocolID, SegmentID}, + protocol_implementations::posix::{ + posix_shared_memory_client::PosixSharedMemoryClient, + posix_shared_memory_provider_backend::{ + LayoutedPosixSharedMemoryProviderBackendBuilder, PosixSharedMemoryProviderBackend, + PosixSharedMemoryProviderBackendBuilder, + }, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::{ + shared_memory_provider::{ + AllocBuilder, AllocLayout, AllocLayoutAlignedBuilder, AllocLayoutBuilder, + AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, + DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, + ForceDeallocPolicy, GarbageCollect, JustAlloc, ProtocolIDSource, + SharedMemoryProvider, SharedMemoryProviderBuilder, + SharedMemoryProviderBuilderBackendID, SharedMemoryProviderBuilderID, + StaticProtocolID, + }, + types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, }, - protocol_id::POSIX_PROTOCOL_ID, - }; - pub use zenoh_shm::api::provider::shared_memory_provider::{ - AllocBuilder, AllocLayout, AllocLayoutAlignedBuilder, AllocLayoutBuilder, - AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, - DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, - ForceDeallocPolicy, GarbageCollect, JustAlloc, ProtocolIDSource, SharedMemoryProvider, - SharedMemoryProviderBuilder, SharedMemoryProviderBuilderBackendID, - SharedMemoryProviderBuilderID, StaticProtocolID, - }; - pub use zenoh_shm::api::provider::types::{ - AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError, }; } From 9fb1e1926139b6d297cb1d0cf347ea0d62a890d2 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Fri, 3 May 2024 15:20:47 +0300 Subject: [PATCH 329/357] Update z_alloc_shm.rs --- examples/examples/z_alloc_shm.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index 2db5e5a44e..93df5d821d 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -40,11 +40,7 @@ async fn run() -> ZResult<()> { // This layout is reusable and can handle series of similar allocations let buffer_layout = { // OPTION 1: Simple (default) configuration: - let simple_layout = provider - .alloc_layout() - .size(512) - .res() - .unwrap(); + let simple_layout = provider.alloc_layout().size(512).res().unwrap(); // OPTION 2: Comprehensive configuration: let _comprehensive_layout = provider From 511bc67abbb672a0e7edebd0fa54dfeeeb60f457 Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Fri, 3 May 2024 16:14:28 +0300 Subject: [PATCH 330/357] Shm refine (#986) * [skip ci] SHM Payload API example and test * Add payload_mut to sample for zsliceshmmut deserialization * Improve SHM examples * Fix merge * Query/Reply shared memory examples * rename payload tests to bytes tests * - fix API exports - fix z_payload_shm example * Add attachment_mut to Sample * [skip ci] fix SHM exports in new api export mechanism * Massive renaming for ZSliceShm and ZSliceShmMut * fix ci * [skip ci] z_payload_shm -> z_bytes_shm * Polish SHM examples * fix lints * fix lint * fix after merge * Update z_alloc_shm.rs --------- Co-authored-by: Luca Cominardi --- .../src/api/{slice => buffer}/mod.rs | 4 +- .../src/api/{slice => buffer}/traits.rs | 0 .../{slice/zsliceshm.rs => buffer/zshm.rs} | 76 ++++----- .../zsliceshmmut.rs => buffer/zshmmut.rs} | 86 +++++------ commons/zenoh-shm/src/api/mod.rs | 2 +- .../api/provider/shared_memory_provider.rs | 12 +- commons/zenoh-shm/src/api/provider/types.rs | 4 +- examples/Cargo.toml | 22 ++- examples/examples/z_alloc_shm.rs | 40 ++--- examples/examples/z_bytes_shm.rs | 103 +++++++++++++ examples/examples/z_get_shm.rs | 144 ++++++++++++++++++ examples/examples/z_ping_shm.rs | 39 ++--- examples/examples/z_posix_shm_provider.rs | 44 ++++++ examples/examples/z_pub_shm.rs | 48 ++---- examples/examples/z_pub_shm_thr.rs | 37 ++--- examples/examples/z_queryable.rs | 7 +- examples/examples/z_queryable_shm.rs | 118 ++++++++++++++ examples/examples/z_sub_shm.rs | 39 +++-- zenoh/src/api/bytes.rs | 48 +++--- zenoh/src/api/encoding.rs | 6 +- zenoh/src/api/query.rs | 5 + zenoh/src/api/queryable.rs | 37 +++-- zenoh/src/api/sample.rs | 13 ++ zenoh/src/api/session.rs | 12 +- zenoh/src/lib.rs | 30 +++- zenoh/src/net/runtime/adminspace.rs | 6 +- zenoh/src/prelude.rs | 4 +- zenoh/tests/bytes.rs | 69 +++++++++ zenoh/tests/payload.rs | 86 ----------- 29 files changed, 784 insertions(+), 357 deletions(-) rename commons/zenoh-shm/src/api/{slice => buffer}/mod.rs (92%) rename commons/zenoh-shm/src/api/{slice => buffer}/traits.rs (100%) rename commons/zenoh-shm/src/api/{slice/zsliceshm.rs => buffer/zshm.rs} (59%) rename commons/zenoh-shm/src/api/{slice/zsliceshmmut.rs => buffer/zshmmut.rs} (59%) create mode 100644 examples/examples/z_bytes_shm.rs create mode 100644 examples/examples/z_get_shm.rs create mode 100644 examples/examples/z_posix_shm_provider.rs create mode 100644 examples/examples/z_queryable_shm.rs create mode 100644 zenoh/tests/bytes.rs delete mode 100644 zenoh/tests/payload.rs diff --git a/commons/zenoh-shm/src/api/slice/mod.rs b/commons/zenoh-shm/src/api/buffer/mod.rs similarity index 92% rename from commons/zenoh-shm/src/api/slice/mod.rs rename to commons/zenoh-shm/src/api/buffer/mod.rs index 59c793f94a..8a3e040da9 100644 --- a/commons/zenoh-shm/src/api/slice/mod.rs +++ b/commons/zenoh-shm/src/api/buffer/mod.rs @@ -13,5 +13,5 @@ // pub mod traits; -pub mod zsliceshm; -pub mod zsliceshmmut; +pub mod zshm; +pub mod zshmmut; diff --git a/commons/zenoh-shm/src/api/slice/traits.rs b/commons/zenoh-shm/src/api/buffer/traits.rs similarity index 100% rename from commons/zenoh-shm/src/api/slice/traits.rs rename to commons/zenoh-shm/src/api/buffer/traits.rs diff --git a/commons/zenoh-shm/src/api/slice/zsliceshm.rs b/commons/zenoh-shm/src/api/buffer/zshm.rs similarity index 59% rename from commons/zenoh-shm/src/api/slice/zsliceshm.rs rename to commons/zenoh-shm/src/api/buffer/zshm.rs index b2ba611b3c..d6f34f293a 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshm.rs +++ b/commons/zenoh-shm/src/api/buffer/zshm.rs @@ -20,44 +20,44 @@ use std::{ use zenoh_buffers::{ZBuf, ZSlice}; -use super::{traits::SHMBuf, zsliceshmmut::zsliceshmmut}; +use super::{traits::SHMBuf, zshmmut::zshmmut}; use crate::SharedMemoryBuf; -/// An immutable SHM slice +/// An immutable SHM buffer #[zenoh_macros::unstable_doc] #[repr(transparent)] #[derive(Clone, Debug, PartialEq, Eq)] -pub struct ZSliceShm(pub(crate) SharedMemoryBuf); +pub struct ZShm(pub(crate) SharedMemoryBuf); -impl SHMBuf for ZSliceShm { +impl SHMBuf for ZShm { fn is_valid(&self) -> bool { self.0.is_valid() } } -impl PartialEq<&zsliceshm> for ZSliceShm { - fn eq(&self, other: &&zsliceshm) -> bool { +impl PartialEq<&zshm> for ZShm { + fn eq(&self, other: &&zshm) -> bool { self.0 == other.0 .0 } } -impl Borrow for ZSliceShm { - fn borrow(&self) -> &zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShm { + fn borrow(&self) -> &zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShm { - fn borrow_mut(&mut self) -> &mut zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShm { + fn borrow_mut(&mut self) -> &mut zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Deref for ZSliceShm { +impl Deref for ZShm { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -65,37 +65,37 @@ impl Deref for ZSliceShm { } } -impl AsRef<[u8]> for ZSliceShm { +impl AsRef<[u8]> for ZShm { fn as_ref(&self) -> &[u8] { self } } -impl From for ZSliceShm { +impl From for ZShm { fn from(value: SharedMemoryBuf) -> Self { Self(value) } } -impl From for ZSlice { - fn from(value: ZSliceShm) -> Self { +impl From for ZSlice { + fn from(value: ZShm) -> Self { value.0.into() } } -impl From for ZBuf { - fn from(value: ZSliceShm) -> Self { +impl From for ZBuf { + fn from(value: ZShm) -> Self { value.0.into() } } -impl TryFrom<&mut ZSliceShm> for &mut zsliceshmmut { +impl TryFrom<&mut ZShm> for &mut zshmmut { type Error = (); - fn try_from(value: &mut ZSliceShm) -> Result { + fn try_from(value: &mut ZShm) -> Result { match value.0.is_unique() && value.0.is_valid() { true => { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction Ok(unsafe { core::mem::transmute(value) }) } @@ -104,64 +104,64 @@ impl TryFrom<&mut ZSliceShm> for &mut zsliceshmmut { } } -/// A borrowed immutable SHM slice +/// A borrowed immutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[allow(non_camel_case_types)] #[repr(transparent)] -pub struct zsliceshm(ZSliceShm); +pub struct zshm(ZShm); -impl ToOwned for zsliceshm { - type Owned = ZSliceShm; +impl ToOwned for zshm { + type Owned = ZShm; fn to_owned(&self) -> Self::Owned { self.0.clone() } } -impl PartialEq for &zsliceshm { - fn eq(&self, other: &ZSliceShm) -> bool { +impl PartialEq for &zshm { + fn eq(&self, other: &ZShm) -> bool { self.0 .0 == other.0 } } -impl Deref for zsliceshm { - type Target = ZSliceShm; +impl Deref for zshm { + type Target = ZShm; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for zsliceshm { +impl DerefMut for zshm { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl From<&SharedMemoryBuf> for &zsliceshm { +impl From<&SharedMemoryBuf> for &zshm { fn from(value: &SharedMemoryBuf) -> Self { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(value) } } } -impl From<&mut SharedMemoryBuf> for &mut zsliceshm { +impl From<&mut SharedMemoryBuf> for &mut zshm { fn from(value: &mut SharedMemoryBuf) -> Self { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(value) } } } -impl TryFrom<&mut zsliceshm> for &mut zsliceshmmut { +impl TryFrom<&mut zshm> for &mut zshmmut { type Error = (); - fn try_from(value: &mut zsliceshm) -> Result { + fn try_from(value: &mut zshm) -> Result { match value.0 .0.is_unique() && value.0 .0.is_valid() { true => { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction Ok(unsafe { core::mem::transmute(value) }) } diff --git a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs b/commons/zenoh-shm/src/api/buffer/zshmmut.rs similarity index 59% rename from commons/zenoh-shm/src/api/slice/zsliceshmmut.rs rename to commons/zenoh-shm/src/api/buffer/zshmmut.rs index d866e4173e..7341b7600c 100644 --- a/commons/zenoh-shm/src/api/slice/zsliceshmmut.rs +++ b/commons/zenoh-shm/src/api/buffer/zshmmut.rs @@ -19,37 +19,37 @@ use zenoh_buffers::{ZBuf, ZSlice}; use super::{ traits::{SHMBuf, SHMBufMut}, - zsliceshm::{zsliceshm, ZSliceShm}, + zshm::{zshm, ZShm}, }; use crate::SharedMemoryBuf; -/// A mutable SHM slice +/// A mutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[repr(transparent)] -pub struct ZSliceShmMut(SharedMemoryBuf); +pub struct ZShmMut(SharedMemoryBuf); -impl SHMBuf for ZSliceShmMut { +impl SHMBuf for ZShmMut { fn is_valid(&self) -> bool { self.0.is_valid() } } -impl SHMBufMut for ZSliceShmMut {} +impl SHMBufMut for ZShmMut {} -impl ZSliceShmMut { +impl ZShmMut { pub(crate) unsafe fn new_unchecked(data: SharedMemoryBuf) -> Self { Self(data) } } -impl PartialEq for &ZSliceShmMut { - fn eq(&self, other: &zsliceshmmut) -> bool { +impl PartialEq for &ZShmMut { + fn eq(&self, other: &zshmmut) -> bool { self.0 == other.0 .0 } } -impl TryFrom for ZSliceShmMut { +impl TryFrom for ZShmMut { type Error = SharedMemoryBuf; fn try_from(value: SharedMemoryBuf) -> Result { @@ -60,10 +60,10 @@ impl TryFrom for ZSliceShmMut { } } -impl TryFrom for ZSliceShmMut { - type Error = ZSliceShm; +impl TryFrom for ZShmMut { + type Error = ZShm; - fn try_from(value: ZSliceShm) -> Result { + fn try_from(value: ZShm) -> Result { match value.0.is_unique() && value.0.is_valid() { true => Ok(Self(value.0)), false => Err(value), @@ -71,39 +71,39 @@ impl TryFrom for ZSliceShmMut { } } -impl Borrow for ZSliceShmMut { - fn borrow(&self) -> &zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShmMut { + fn borrow(&self) -> &zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShmMut { - fn borrow_mut(&mut self) -> &mut zsliceshm { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShmMut { + fn borrow_mut(&mut self) -> &mut zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Borrow for ZSliceShmMut { - fn borrow(&self) -> &zsliceshmmut { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl Borrow for ZShmMut { + fn borrow(&self) -> &zshmmut { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl BorrowMut for ZSliceShmMut { - fn borrow_mut(&mut self) -> &mut zsliceshmmut { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] +impl BorrowMut for ZShmMut { + fn borrow_mut(&mut self) -> &mut zshmmut { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction unsafe { core::mem::transmute(self) } } } -impl Deref for ZSliceShmMut { +impl Deref for ZShmMut { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -111,75 +111,75 @@ impl Deref for ZSliceShmMut { } } -impl DerefMut for ZSliceShmMut { +impl DerefMut for ZShmMut { fn deref_mut(&mut self) -> &mut Self::Target { self.0.as_mut() } } -impl AsRef<[u8]> for ZSliceShmMut { +impl AsRef<[u8]> for ZShmMut { fn as_ref(&self) -> &[u8] { self } } -impl AsMut<[u8]> for ZSliceShmMut { +impl AsMut<[u8]> for ZShmMut { fn as_mut(&mut self) -> &mut [u8] { self } } -impl From for ZSliceShm { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZShm { + fn from(value: ZShmMut) -> Self { value.0.into() } } -impl From for ZSlice { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZSlice { + fn from(value: ZShmMut) -> Self { value.0.into() } } -impl From for ZBuf { - fn from(value: ZSliceShmMut) -> Self { +impl From for ZBuf { + fn from(value: ZShmMut) -> Self { value.0.into() } } -/// A borrowed mutable SHM slice +/// A borrowed mutable SHM buffer #[zenoh_macros::unstable_doc] #[derive(Debug, PartialEq, Eq)] #[allow(non_camel_case_types)] #[repr(transparent)] -pub struct zsliceshmmut(ZSliceShmMut); +pub struct zshmmut(ZShmMut); -impl PartialEq for &zsliceshmmut { - fn eq(&self, other: &ZSliceShmMut) -> bool { +impl PartialEq for &zshmmut { + fn eq(&self, other: &ZShmMut) -> bool { self.0 .0 == other.0 } } -impl Deref for zsliceshmmut { - type Target = ZSliceShmMut; +impl Deref for zshmmut { + type Target = ZShmMut; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for zsliceshmmut { +impl DerefMut for zshmmut { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl TryFrom<&mut SharedMemoryBuf> for &mut zsliceshmmut { +impl TryFrom<&mut SharedMemoryBuf> for &mut zshmmut { type Error = (); fn try_from(value: &mut SharedMemoryBuf) -> Result { match value.is_unique() && value.is_valid() { - // SAFETY: ZSliceShm, ZSliceShmMut, zsliceshm and zsliceshmmut are #[repr(transparent)] + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] // to SharedMemoryBuf type, so it is safe to transmute them in any direction true => Ok(unsafe { core::mem::transmute(value) }), false => Err(()), diff --git a/commons/zenoh-shm/src/api/mod.rs b/commons/zenoh-shm/src/api/mod.rs index 08a5678fa8..a87188da29 100644 --- a/commons/zenoh-shm/src/api/mod.rs +++ b/commons/zenoh-shm/src/api/mod.rs @@ -12,9 +12,9 @@ // ZettaScale Zenoh Team, // +pub mod buffer; pub mod client; pub mod client_storage; pub mod common; pub mod protocol_implementations; pub mod provider; -pub mod slice; diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index 58109a699d..1ca560f07e 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -28,7 +28,7 @@ use super::{ types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, }; use crate::{ - api::{common::types::ProtocolID, slice::zsliceshmmut::ZSliceShmMut}, + api::{buffer::zshmmut::ZShmMut, common::types::ProtocolID}, header::{ allocated_descriptor::AllocatedHeaderDescriptor, descriptor::HeaderDescriptor, storage::GLOBAL_HEADER_STORAGE, @@ -712,11 +712,11 @@ where self.backend.defragment() } - /// Map externally-allocated chunk into ZSliceShmMut. + /// Map externally-allocated chunk into ZShmMut. /// This method is designed to be used with push data sources. /// Remember that chunk's len may be >= len! #[zenoh_macros::unstable_doc] - pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { + pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { // allocate resources for SHM buffer let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; @@ -728,7 +728,7 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } /// Try to collect free chunks. @@ -805,7 +805,7 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } fn alloc_resources() -> ZResult<( @@ -910,6 +910,6 @@ where allocated_watchdog, confirmed_watchdog, ); - Ok(unsafe { ZSliceShmMut::new_unchecked(wrapped) }) + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) } } diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index ddf949ee75..beae24bfb7 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -17,7 +17,7 @@ use std::fmt::Display; use zenoh_result::{bail, ZResult}; use super::chunk::AllocatedChunk; -use crate::api::slice::zsliceshmmut::ZSliceShmMut; +use crate::api::buffer::zshmmut::ZShmMut; /// Allocation errors /// @@ -169,4 +169,4 @@ pub type ChunkAllocResult = Result; /// SHM buffer allocation result #[zenoh_macros::unstable_doc] -pub type BufAllocResult = Result; +pub type BufAllocResult = Result; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index e117507ae9..90281ae558 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -100,6 +100,11 @@ path = "examples/z_pull.rs" name = "z_queryable" path = "examples/z_queryable.rs" +[[example]] +name = "z_queryable_shm" +path = "examples/z_queryable_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_storage" path = "examples/z_storage.rs" @@ -108,6 +113,11 @@ path = "examples/z_storage.rs" name = "z_get" path = "examples/z_get.rs" +[[example]] +name = "z_get_shm" +path = "examples/z_get_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_forward" path = "examples/z_forward.rs" @@ -156,4 +166,14 @@ path = "examples/z_pong.rs" [[example]] name = "z_alloc_shm" path = "examples/z_alloc_shm.rs" -required-features = ["unstable", "shared-memory"] \ No newline at end of file +required-features = ["unstable", "shared-memory"] + +[[example]] +name = "z_bytes_shm" +path = "examples/z_bytes_shm.rs" +required-features = ["unstable", "shared-memory"] + +[[example]] +name = "z_posix_shm_provider" +path = "examples/z_posix_shm_provider.rs" +required-features = ["unstable", "shared-memory"] diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index acff39379c..93df5d821d 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -21,29 +21,15 @@ async fn main() { } async fn run() -> ZResult<()> { - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(65536, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(65536) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); @@ -54,14 +40,10 @@ async fn run() -> ZResult<()> { // This layout is reusable and can handle series of similar allocations let buffer_layout = { // OPTION 1: Simple (default) configuration: - let simple_layout = shared_memory_provider - .alloc_layout() - .size(512) - .res() - .unwrap(); + let simple_layout = provider.alloc_layout().size(512).res().unwrap(); // OPTION 2: Comprehensive configuration: - let _comprehensive_layout = shared_memory_provider + let _comprehensive_layout = provider .alloc_layout() .size(512) .alignment(AllocAlignment::new(2)) diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs new file mode 100644 index 0000000000..5c582e56e6 --- /dev/null +++ b/examples/examples/z_bytes_shm.rs @@ -0,0 +1,103 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::{ + bytes::ZBytes, + shm::{ + zshm, zshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, ZShm, + ZShmMut, POSIX_PROTOCOL_ID, + }, +}; + +fn main() { + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut owned_shm_buf_mut = provider + .alloc_layout() + .size(1024) + .res() + .unwrap() + .alloc() + .res() + .unwrap(); + + // mutable and immutable API + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // convert into immutable owned buffer (ZShmMut -> ZSlceShm) + let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); + + // immutable API + let _data: &[u8] = &owned_shm_buf; + + // convert again into mutable owned buffer (ZShm -> ZSlceShmMut) + let mut owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); + + // mutable and immutable API + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // build a ZBytes from an SHM buffer (ZShmMut -> ZBytes) + let mut payload: ZBytes = owned_shm_buf_mut.into(); + + // branch to illustrate immutable access to SHM data + { + // deserialize ZBytes as an immutably borrowed zshm (ZBytes -> &zshm) + let borrowed_shm_buf: &zshm = payload.deserialize().unwrap(); + + // immutable API + let _data: &[u8] = borrowed_shm_buf; + + // construct owned buffer from borrowed type (&zshm -> ZShm) + let owned = borrowed_shm_buf.to_owned(); + + // immutable API + let _data: &[u8] = &owned; + + // try to construct mutable ZShmMut (ZShm -> ZShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZShm has two existing references ('owned' and inside 'payload') + assert!(owned_mut.is_err()) + } + + // branch to illustrate mutable access to SHM data + { + // deserialize ZBytes as mutably borrowed zshm (ZBytes -> &mut zshm) + let borrowed_shm_buf: &mut zshm = payload.deserialize_mut().unwrap(); + + // immutable API + let _data: &[u8] = borrowed_shm_buf; + + // convert zshm to zshmmut (&mut zshm -> &mut zshmmut) + let borrowed_shm_buf_mut: &mut zshmmut = borrowed_shm_buf.try_into().unwrap(); + + // mutable and immutable API + let _data: &[u8] = borrowed_shm_buf_mut; + let _data_mut: &mut [u8] = borrowed_shm_buf_mut; + } +} diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs new file mode 100644 index 0000000000..39caf3a101 --- /dev/null +++ b/examples/examples/z_get_shm.rs @@ -0,0 +1,144 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::time::Duration; + +use clap::Parser; +use zenoh::prelude::*; +use zenoh_examples::CommonArgs; + +const N: usize = 10; + +#[tokio::main] +async fn main() { + // initiate logging + zenoh_util::try_init_log_from_env(); + + let (mut config, selector, mut value, target, timeout) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + println!("Opening session..."); + let session = zenoh::open(config).await.unwrap(); + + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + println!("Allocating Shared Memory Buffer..."); + let mut sbuf = provider + .alloc_layout() + .size(1024) + .res() + .unwrap() + .alloc() + .with_policy::>() + .res_async() + .await + .unwrap(); + + let content = value + .take() + .unwrap_or_else(|| "Get from SharedMemory Rust!".to_string()); + sbuf[0..content.len()].copy_from_slice(content.as_bytes()); + + println!("Sending Query '{selector}'..."); + let replies = session + .get(&selector) + .value(sbuf) + .target(target) + .timeout(timeout) + .await + .unwrap(); + + while let Ok(reply) = replies.recv_async().await { + match reply.result() { + Ok(sample) => { + print!(">> Received ('{}': ", sample.key_expr().as_str()); + match sample.payload().deserialize::<&zshm>() { + Ok(payload) => println!("'{}')", String::from_utf8_lossy(payload),), + Err(e) => println!("'Not a SharedMemoryBuf: {:?}')", e), + } + } + Err(err) => { + let payload = err + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } + } + } +} + +#[derive(clap::ValueEnum, Clone, Copy, Debug)] +#[value(rename_all = "SCREAMING_SNAKE_CASE")] +enum Qt { + BestMatching, + All, + AllComplete, +} + +#[derive(Parser, Clone, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/**")] + /// The selection of resources to query + selector: Selector<'static>, + /// The value to publish. + value: Option, + #[arg(short, long, default_value = "BEST_MATCHING")] + /// The target queryables of the query. + target: Qt, + #[arg(short = 'o', long, default_value = "10000")] + /// The query timeout in milliseconds. + timeout: u64, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> ( + Config, + Selector<'static>, + Option, + QueryTarget, + Duration, +) { + let args = Args::parse(); + ( + args.common.into(), + args.selector, + args.value, + match args.target { + Qt::BestMatching => QueryTarget::BestMatching, + Qt::All => QueryTarget::All, + Qt::AllComplete => QueryTarget::AllComplete, + }, + Duration::from_millis(args.timeout), + ) +} diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index d4c5b4f162..4c3ad4ed40 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -45,34 +45,23 @@ fn main() { let mut samples = Vec::with_capacity(n); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(size) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); - let buf = shared_memory_provider + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let buf = provider .alloc_layout() .size(size) .res() @@ -81,7 +70,7 @@ fn main() { .res() .unwrap(); - // convert ZSliceShmMut into ZSlice as ZSliceShmMut does not support Clone + // convert ZShmMut into ZSlice as ZShmMut does not support Clone let buf: ZSlice = buf.into(); // -- warmup -- diff --git a/examples/examples/z_posix_shm_provider.rs b/examples/examples/z_posix_shm_provider.rs new file mode 100644 index 0000000000..cdf502bc61 --- /dev/null +++ b/examples/examples/z_posix_shm_provider.rs @@ -0,0 +1,44 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::prelude::*; + +fn main() { + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. + + // Total amount of shared memory to allocate + let size = 4096; + + // An alignment for POSIX SHM provider + // Due to internal optimization, all allocations will be aligned corresponding to this alignment, + // so the provider will be able to satisfy allocation layouts with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // A layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); + + // Build a provider backend + PosixSharedMemoryProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let _shared_memory_provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); +} diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 92d19b6b06..d2a87a59cc 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -16,7 +16,6 @@ use zenoh::prelude::*; use zenoh_examples::CommonArgs; const N: usize = 10; -const K: u32 = 3; #[tokio::main] async fn main() -> Result<(), ZError> { @@ -33,46 +32,31 @@ async fn main() -> Result<(), ZError> { println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); - println!("Creating POSIX SHM backend..."); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(N * 1024, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - println!("Creating SHM Provider with POSIX backend..."); - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); let publisher = session.declare_publisher(&path).await.unwrap(); + // Create allocation layout for series of similar allocations println!("Allocating Shared Memory Buffer..."); - let layout = shared_memory_provider - .alloc_layout() - .size(1024) - .res() - .unwrap(); + let layout = provider.alloc_layout().size(1024).res().unwrap(); println!("Press CTRL-C to quit..."); - for idx in 0..(K * N as u32) { + for idx in 0..u32::MAX { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + // Allocate particular SHM buffer using pre-created layout let mut sbuf = layout .alloc() .with_policy::>() diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 0b94304321..0d44fbe6ee 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -28,34 +28,23 @@ async fn main() { let z = zenoh::open(config).await.unwrap(); - // Construct an SHM backend - let backend = { - // NOTE: code in this block is a specific PosixSharedMemoryProviderBackend API. - // The initialisation of SHM backend is completely backend-specific and user is free to do - // anything reasonable here. This code is execuated at the provider's first use - - // Alignment for POSIX SHM provider - // All allocations will be aligned corresponding to this alignment - - // that means that the provider will be able to satisfy allocation layouts - // with alignment <= provider_alignment - let provider_alignment = AllocAlignment::default(); - - // Create layout for POSIX Provider's memory - let provider_layout = MemoryLayout::new(sm_size, provider_alignment).unwrap(); - - PosixSharedMemoryProviderBackend::builder() - .with_layout(provider_layout) - .res() - .unwrap() - }; - - // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID - let shared_memory_provider = SharedMemoryProviderBuilder::builder() + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(sm_size) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() .protocol_id::() .backend(backend) .res(); - let mut buf = shared_memory_provider + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut buf = provider .alloc_layout() .size(size) .res() diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index e24b8e80cb..dcdca82c09 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -20,7 +20,12 @@ async fn main() { // initiate logging zenoh_util::try_init_log_from_env(); - let (config, key_expr, value, complete) = parse_args(); + let (mut config, key_expr, value, complete) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); println!("Opening session..."); let session = zenoh::open(config).await.unwrap(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs new file mode 100644 index 0000000000..ed2320d2c5 --- /dev/null +++ b/examples/examples/z_queryable_shm.rs @@ -0,0 +1,118 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use zenoh::prelude::*; +use zenoh_examples::CommonArgs; + +const N: usize = 10; + +#[tokio::main] +async fn main() { + // initiate logging + zenoh_util::try_init_log_from_env(); + + let (mut config, key_expr, value, complete) = parse_args(); + + // A probing procedure for shared memory is performed upon session opening. To enable `z_get_shm` to operate + // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + config.transport.shared_memory.set_enabled(true).unwrap(); + + println!("Opening session..."); + let session = zenoh::open(config).await.unwrap(); + + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixSharedMemoryProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + println!("Declaring Queryable on '{key_expr}'..."); + let queryable = session + .declare_queryable(&key_expr) + .complete(complete) + .await + .unwrap(); + + println!("Press CTRL-C to quit..."); + while let Ok(query) = queryable.recv_async().await { + print!( + ">> [Queryable] Received Query '{}' ('{}'", + query.selector(), + query.key_expr().as_str(), + ); + if let Some(payload) = query.payload() { + match payload.deserialize::<&zshm>() { + Ok(payload) => print!(": '{}'", String::from_utf8_lossy(payload)), + Err(e) => print!(": 'Not a SharedMemoryBuf: {:?}'", e), + } + } + println!(")"); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + println!("Allocating Shared Memory Buffer..."); + let mut sbuf = provider + .alloc_layout() + .size(1024) + .res() + .unwrap() + .alloc() + .with_policy::>() + .res_async() + .await + .unwrap(); + + sbuf[0..value.len()].copy_from_slice(value.as_bytes()); + + println!( + ">> [Queryable] Responding ('{}': '{}')", + key_expr.as_str(), + value, + ); + query + .reply(key_expr.clone(), sbuf) + .await + .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); + } +} + +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-queryable")] + /// The key expression matching queries to reply to. + key: KeyExpr<'static>, + #[arg(short, long, default_value = "Queryable from SharedMemory Rust!")] + /// The value to reply to queries. + value: String, + #[arg(long)] + /// Declare the queryable as complete w.r.t. the key expression. + complete: bool, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, KeyExpr<'static>, String, bool) { + let args = Args::parse(); + (args.common.into(), args.key, args.value, args.complete) +} diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs index 9914539ed5..bab31d4a2a 100644 --- a/examples/examples/z_sub_shm.rs +++ b/examples/examples/z_sub_shm.rs @@ -35,18 +35,37 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { - match sample.payload().deserialize::<&zsliceshm>() { - Ok(payload) => println!( - ">> [Subscriber] Received {} ('{}': '{:02x?}')", - sample.kind(), - sample.key_expr().as_str(), - payload - ), - Err(e) => { - println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); - } + print!( + ">> [Subscriber] Received {} ('{}': ", + sample.kind(), + sample.key_expr().as_str(), + ); + match sample.payload().deserialize::<&zshm>() { + Ok(payload) => print!("'{}'", String::from_utf8_lossy(payload)), + Err(e) => print!("'Not a SharedMemoryBuf: {:?}'", e), } + println!(")"); } + + // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber + // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. + // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. + // + // use zenoh::shm::zshmmut; + + // while let Ok(mut sample) = subscriber.recv_async().await { + // let kind = sample.kind(); + // let key_expr = sample.key_expr().to_string(); + // match sample.payload_mut().deserialize_mut::<&mut zshmmut>() { + // Ok(payload) => println!( + // ">> [Subscriber] Received {} ('{}': '{:02x?}')", + // kind, key_expr, payload + // ), + // Err(e) => { + // println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); + // } + // } + // } } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index fb32910b54..98afd1a3c3 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -30,9 +30,9 @@ use zenoh_protocol::{core::Properties, zenoh::ext::AttachmentType}; use zenoh_result::{ZError, ZResult}; #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::{ - api::slice::{ - zsliceshm::{zsliceshm, ZSliceShm}, - zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + api::buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, }, SharedMemoryBuf, }; @@ -1526,47 +1526,47 @@ impl TryFrom<&mut ZBytes> for serde_pickle::Value { // Shared memory conversion #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: ZSliceShm) -> Self::Output { + fn serialize(self, t: ZShm) -> Self::Output { let slice: ZSlice = t.into(); ZBytes::new(slice) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShm) -> Self { +impl From for ZBytes { + fn from(t: ZShm) -> Self { ZSerde.serialize(t) } } // Shared memory conversion #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl Serialize for ZSerde { +impl Serialize for ZSerde { type Output = ZBytes; - fn serialize(self, t: ZSliceShmMut) -> Self::Output { + fn serialize(self, t: ZShmMut) -> Self::Output { let slice: ZSlice = t.into(); ZBytes::new(slice) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl From for ZBytes { - fn from(t: ZSliceShmMut) -> Self { +impl From for ZBytes { + fn from(t: ZShmMut) -> Self { ZSerde.serialize(t) } } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { +impl<'a> Deserialize<'a, &'a zshm> for ZSerde { type Input = &'a ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a zsliceshm, Self::Error> { - // A ZSliceShm is expected to have only one slice + fn deserialize(self, v: Self::Input) -> Result<&'a zshm, Self::Error> { + // A ZShm is expected to have only one slice let mut zslices = v.0.zslices(); if let Some(zs) = zslices.next() { if let Some(shmb) = zs.downcast_ref::() { @@ -1578,7 +1578,7 @@ impl<'a> Deserialize<'a, &'a zsliceshm> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { +impl<'a> TryFrom<&'a ZBytes> for &'a zshm { type Error = ZDeserializeError; fn try_from(value: &'a ZBytes) -> Result { @@ -1587,7 +1587,7 @@ impl<'a> TryFrom<&'a ZBytes> for &'a zsliceshm { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshm { type Error = ZDeserializeError; fn try_from(value: &'a mut ZBytes) -> Result { @@ -1596,11 +1596,11 @@ impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshm { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { +impl<'a> Deserialize<'a, &'a mut zshm> for ZSerde { type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshm, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<&'a mut zshm, Self::Error> { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { @@ -1613,11 +1613,11 @@ impl<'a> Deserialize<'a, &'a mut zsliceshm> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { +impl<'a> Deserialize<'a, &'a mut zshmmut> for ZSerde { type Input = &'a mut ZBytes; type Error = ZDeserializeError; - fn deserialize(self, v: Self::Input) -> Result<&'a mut zsliceshmmut, Self::Error> { + fn deserialize(self, v: Self::Input) -> Result<&'a mut zshmmut, Self::Error> { // A ZSliceShmBorrowMut is expected to have only one slice let mut zslices = v.0.zslices_mut(); if let Some(zs) = zslices.next() { @@ -1630,7 +1630,7 @@ impl<'a> Deserialize<'a, &'a mut zsliceshmmut> for ZSerde { } #[cfg(all(feature = "shared-memory", feature = "unstable"))] -impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zsliceshmmut { +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshmmut { type Error = ZDeserializeError; fn try_from(value: &'a mut ZBytes) -> Result { @@ -1834,12 +1834,12 @@ mod tests { use zenoh_protocol::core::Properties; #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::api::{ + buffer::zshm::{zshm, ZShm}, protocol_implementations::posix::{ posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, protocol_id::POSIX_PROTOCOL_ID, }, provider::shared_memory_provider::SharedMemoryProviderBuilder, - slice::zsliceshm::{zsliceshm, ZSliceShm}, }; use super::ZBytes; @@ -1967,9 +1967,9 @@ mod tests { let mutable_shm_buf = layout.alloc().res().unwrap(); // convert to immutable SHM buffer - let immutable_shm_buf: ZSliceShm = mutable_shm_buf.into(); + let immutable_shm_buf: ZShm = mutable_shm_buf.into(); - serialize_deserialize!(&zsliceshm, immutable_shm_buf); + serialize_deserialize!(&zshm, immutable_shm_buf); } // Properties diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index f1be92c7ac..29c65f837e 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -17,7 +17,7 @@ use phf::phf_map; use zenoh_buffers::{ZBuf, ZSlice}; use zenoh_protocol::core::EncodingId; #[cfg(feature = "shared-memory")] -use zenoh_shm::api::slice::{zsliceshm::ZSliceShm, zsliceshmmut::ZSliceShmMut}; +use zenoh_shm::api::buffer::{zshm::ZShm, zshmmut::ZShmMut}; use super::bytes::ZBytes; @@ -837,10 +837,10 @@ impl EncodingMapping for serde_pickle::Value { // - Zenoh SHM #[cfg(feature = "shared-memory")] -impl EncodingMapping for ZSliceShm { +impl EncodingMapping for ZShm { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } #[cfg(feature = "shared-memory")] -impl EncodingMapping for ZSliceShmMut { +impl EncodingMapping for ZShmMut { const ENCODING: Encoding = Encoding::ZENOH_BYTES; } diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs index e344237087..66de2e5700 100644 --- a/zenoh/src/api/query.rs +++ b/zenoh/src/api/query.rs @@ -93,6 +93,11 @@ impl Reply { self.result.as_ref() } + /// Gets the a mutable borrowed result of this `Reply`. Use [`Reply::into_result`] to take ownership of the result. + pub fn result_mut(&mut self) -> Result<&mut Sample, &mut Value> { + self.result.as_mut() + } + /// Converts this `Reply` into the its result. Use [`Reply::result`] it you don't want to take ownership. pub fn into_result(self) -> Result { self.result diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs index e2343811db..0653c4433d 100644 --- a/zenoh/src/api/queryable.rs +++ b/zenoh/src/api/queryable.rs @@ -51,18 +51,11 @@ use super::{ use crate::net::primitives::Primitives; pub(crate) struct QueryInner { - /// The key expression of this Query. pub(crate) key_expr: KeyExpr<'static>, - /// This Query's selector parameters. pub(crate) parameters: Parameters<'static>, - /// This Query's body. - pub(crate) value: Option, - pub(crate) qid: RequestId, pub(crate) zid: ZenohId, pub(crate) primitives: Arc, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, } impl Drop for QueryInner { @@ -80,6 +73,9 @@ impl Drop for QueryInner { pub struct Query { pub(crate) inner: Arc, pub(crate) eid: EntityId, + pub(crate) value: Option, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, } impl Query { @@ -107,24 +103,43 @@ impl Query { /// This Query's value. #[inline(always)] pub fn value(&self) -> Option<&Value> { - self.inner.value.as_ref() + self.value.as_ref() + } + + /// This Query's value. + #[inline(always)] + pub fn value_mut(&mut self) -> Option<&mut Value> { + self.value.as_mut() } /// This Query's payload. #[inline(always)] pub fn payload(&self) -> Option<&ZBytes> { - self.inner.value.as_ref().map(|v| &v.payload) + self.value.as_ref().map(|v| &v.payload) + } + + /// This Query's payload. + #[inline(always)] + pub fn payload_mut(&mut self) -> Option<&mut ZBytes> { + self.value.as_mut().map(|v| &mut v.payload) } /// This Query's encoding. #[inline(always)] pub fn encoding(&self) -> Option<&Encoding> { - self.inner.value.as_ref().map(|v| &v.encoding) + self.value.as_ref().map(|v| &v.encoding) } + /// This Query's attachment. #[zenoh_macros::unstable] pub fn attachment(&self) -> Option<&ZBytes> { - self.inner.attachment.as_ref() + self.attachment.as_ref() + } + + /// This Query's attachment. + #[zenoh_macros::unstable] + pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { + self.attachment.as_mut() } /// Sends a reply in the form of [`Sample`] to this Query. diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs index 2551a2a0d9..f70f024677 100644 --- a/zenoh/src/api/sample.rs +++ b/zenoh/src/api/sample.rs @@ -306,6 +306,12 @@ impl Sample { &self.payload } + /// Gets the payload of this Sample. + #[inline] + pub fn payload_mut(&mut self) -> &mut ZBytes { + &mut self.payload + } + /// Gets the kind of this Sample. #[inline] pub fn kind(&self) -> SampleKind { @@ -352,6 +358,13 @@ impl Sample { pub fn attachment(&self) -> Option<&ZBytes> { self.attachment.as_ref() } + + /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. + #[zenoh_macros::unstable] + #[inline] + pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { + self.attachment.as_mut() + } } impl From for Value { diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 2e718ecccb..018a3a085e 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -1806,10 +1806,6 @@ impl Session { let query_inner = Arc::new(QueryInner { key_expr, parameters: parameters.to_owned().into(), - value: body.map(|b| Value { - payload: b.payload.into(), - encoding: b.encoding.into(), - }), qid, zid, primitives: if local { @@ -1817,13 +1813,17 @@ impl Session { } else { primitives }, - #[cfg(feature = "unstable")] - attachment, }); for (eid, callback) in queryables { callback(Query { inner: query_inner.clone(), eid, + value: body.as_ref().map(|b| Value { + payload: b.payload.clone().into(), + encoding: b.encoding.clone().into(), + }), + #[cfg(feature = "unstable")] + attachment: attachment.clone(), }); } } diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 58e17fc2ea..caf961984b 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -379,20 +379,34 @@ pub mod internal { #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub mod shm { pub use zenoh_shm::api::{ - client_storage::SharedMemoryClientStorage, + buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, + }, + client::{ + shared_memory_client::SharedMemoryClient, shared_memory_segment::SharedMemorySegment, + }, + client_storage::{SharedMemoryClientStorage, GLOBAL_CLIENT_STORAGE}, + common::types::{ChunkID, ProtocolID, SegmentID}, protocol_implementations::posix::{ - posix_shared_memory_provider_backend::PosixSharedMemoryProviderBackend, + posix_shared_memory_client::PosixSharedMemoryClient, + posix_shared_memory_provider_backend::{ + LayoutedPosixSharedMemoryProviderBackendBuilder, PosixSharedMemoryProviderBackend, + PosixSharedMemoryProviderBackendBuilder, + }, protocol_id::POSIX_PROTOCOL_ID, }, provider::{ shared_memory_provider::{ - BlockOn, Deallocate, Defragment, GarbageCollect, SharedMemoryProviderBuilder, + AllocBuilder, AllocLayout, AllocLayoutAlignedBuilder, AllocLayoutBuilder, + AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, + DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, + ForceDeallocPolicy, GarbageCollect, JustAlloc, ProtocolIDSource, + SharedMemoryProvider, SharedMemoryProviderBuilder, + SharedMemoryProviderBuilderBackendID, SharedMemoryProviderBuilderID, + StaticProtocolID, }, - types::{AllocAlignment, MemoryLayout}, - }, - slice::{ - zsliceshm::{zsliceshm, ZSliceShm}, - zsliceshmmut::{zsliceshmmut, ZSliceShmMut}, + types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, }, }; } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 8b53692ead..62f6b7c8b4 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -466,14 +466,14 @@ impl Primitives for AdminSpace { inner: Arc::new(QueryInner { key_expr: key_expr.clone(), parameters: query.parameters.into(), - value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), qid: msg.id, zid, primitives, - #[cfg(feature = "unstable")] - attachment: query.ext_attachment.map(Into::into), }), eid: self.queryable_id, + value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), + #[cfg(feature = "unstable")] + attachment: query.ext_attachment.map(Into::into), }; for (key, handler) in &self.handlers { diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 54418d9f78..2ed94e6f47 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -26,7 +26,7 @@ // Reexport API in flat namespace pub(crate) mod flat { - #[cfg(feature = "shared-memory")] + #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub use crate::shm::*; pub use crate::{ buffers::*, @@ -51,7 +51,7 @@ pub(crate) mod flat { // Reexport API in hierarchical namespace pub(crate) mod mods { - #[cfg(feature = "shared-memory")] + #[cfg(all(feature = "unstable", feature = "shared-memory"))] pub use crate::shm; pub use crate::{ buffers, bytes, config, core, encoding, handlers, key_expr, publication, query, queryable, diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs new file mode 100644 index 0000000000..6de12ab63f --- /dev/null +++ b/zenoh/tests/bytes.rs @@ -0,0 +1,69 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +#[test] +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +fn shm_bytes_single_buf() { + use zenoh::prelude::*; + + // create an SHM backend... + let backend = PosixSharedMemoryProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = SharedMemoryProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc_layout().size(1024).res().unwrap(); + + // allocate an SHM buffer (ZShmMut) + let owned_shm_buf_mut = layout.alloc().res().unwrap(); + + // convert into immutable owned buffer (ZShmMut -> ZSlceShm) + let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); + + // convert again into mutable owned buffer (ZShm -> ZSlceShmMut) + let owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); + + // build a ZBytes from an SHM buffer (ZShmMut -> ZBytes) + let mut payload: ZBytes = owned_shm_buf_mut.into(); + + // branch to illustrate immutable access to SHM data + { + // deserialize ZBytes as an immutably borrowed zshm (ZBytes -> &zshm) + let borrowed_shm_buf: &zshm = payload.deserialize().unwrap(); + + // construct owned buffer from borrowed type (&zshm -> ZShm) + let owned = borrowed_shm_buf.to_owned(); + + // try to construct mutable ZShmMut (ZShm -> ZShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZShm has two existing references ('owned' and inside 'payload') + assert!(owned_mut.is_err()) + } + + // branch to illustrate mutable access to SHM data + { + // deserialize ZBytes as mutably borrowed zshm (ZBytes -> &mut zshm) + let borrowed_shm_buf: &mut zshm = payload.deserialize_mut().unwrap(); + + // convert zshm to zshmmut (&mut zshm -> &mut zshmmut) + let _borrowed_shm_buf_mut: &mut zshmmut = borrowed_shm_buf.try_into().unwrap(); + } +} diff --git a/zenoh/tests/payload.rs b/zenoh/tests/payload.rs deleted file mode 100644 index fecf10a608..0000000000 --- a/zenoh/tests/payload.rs +++ /dev/null @@ -1,86 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -#[test] -#[cfg(all(feature = "shared-memory", feature = "unstable"))] -fn shm_payload_single_buf() { - use zenoh::prelude::*; - - // create an SHM backend... - let backend = PosixSharedMemoryProviderBackend::builder() - .with_size(4096) - .unwrap() - .res() - .unwrap(); - // ...and an SHM provider - let provider = SharedMemoryProviderBuilder::builder() - .protocol_id::() - .backend(backend) - .res(); - - // Prepare a layout for allocations - let layout = provider.alloc_layout().size(1024).res().unwrap(); - - // allocate an SHM buffer - let mut owned_shm_buf_mut = layout.alloc().res().unwrap(); - - // get data - let _data: &[u8] = &owned_shm_buf_mut; - let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - - // convert into immutable owned buffer - let owned_shm_buf: ZSliceShm = owned_shm_buf_mut.into(); - - // get data - let _data: &[u8] = &owned_shm_buf; - - // convert again into mutable owned buffer - let mut owned_shm_buf_mut: ZSliceShmMut = owned_shm_buf.try_into().unwrap(); - - // get data - let _data: &[u8] = &owned_shm_buf_mut; - let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; - - // build a ZBytes from an SHM buffer - let mut payload: ZBytes = owned_shm_buf_mut.into(); - - { - // deserialize ZBytes as borrowed zsliceshm - let borrowed_shm_buf: &zsliceshm = payload.deserialize().unwrap(); - - // get data - let _data: &[u8] = borrowed_shm_buf; - - // construct owned buffer from borrowed type - let owned = borrowed_shm_buf.to_owned(); - - // get data - let _data: &[u8] = &owned; - } - - { - // deserialize ZBytes as mutably borrowed zsliceshm - let borrowed_shm_buf: &mut zsliceshm = payload.deserialize_mut().unwrap(); - - // get data - let _data: &[u8] = borrowed_shm_buf; - - // convert zsliceshm to zsliceshmmut - let borrowed_shm_buf_mut: &mut zsliceshmmut = borrowed_shm_buf.try_into().unwrap(); - - // get data - let _data: &[u8] = borrowed_shm_buf_mut; - let _data_mut: &mut [u8] = borrowed_shm_buf_mut; - } -} From f5195c04c403058c8aa1537b6a539037d476e649 Mon Sep 17 00:00:00 2001 From: Gabriele Baldoni Date: Fri, 3 May 2024 14:12:09 +0000 Subject: [PATCH 331/357] chore: renamed no_mangle feature to dynamic_plugin (#1010) * chore: renamed no_mangle feature to dynamic_plugin Signed-off-by: gabrik * fix: wrong find and replace fixed Signed-off-by: gabrik --------- Signed-off-by: gabrik --- plugins/zenoh-backend-example/Cargo.toml | 4 ++-- plugins/zenoh-backend-example/src/lib.rs | 2 +- plugins/zenoh-plugin-example/Cargo.toml | 4 ++-- plugins/zenoh-plugin-example/src/lib.rs | 2 +- plugins/zenoh-plugin-rest/Cargo.toml | 4 ++-- plugins/zenoh-plugin-rest/src/lib.rs | 2 +- plugins/zenoh-plugin-storage-manager/Cargo.toml | 4 ++-- plugins/zenoh-plugin-storage-manager/src/lib.rs | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/plugins/zenoh-backend-example/Cargo.toml b/plugins/zenoh-backend-example/Cargo.toml index eab4e8edb3..5ca4d3096b 100644 --- a/plugins/zenoh-backend-example/Cargo.toml +++ b/plugins/zenoh-backend-example/Cargo.toml @@ -20,8 +20,8 @@ edition = { workspace = true } publish = false [features] -default = ["no_mangle", "zenoh/default"] -no_mangle = [] +default = ["dynamic_plugin", "zenoh/default"] +dynamic_plugin = [] [lib] name = "zenoh_backend_example" diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index 602d29f375..aef889c64e 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -26,7 +26,7 @@ use zenoh_backend_traits::{ use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; use zenoh_result::ZResult; -#[cfg(feature = "no_mangle")] +#[cfg(feature = "dynamic_plugin")] zenoh_plugin_trait::declare_plugin!(ExampleBackend); impl Plugin for ExampleBackend { diff --git a/plugins/zenoh-plugin-example/Cargo.toml b/plugins/zenoh-plugin-example/Cargo.toml index ce12dbf18e..7cb0ad66f6 100644 --- a/plugins/zenoh-plugin-example/Cargo.toml +++ b/plugins/zenoh-plugin-example/Cargo.toml @@ -20,8 +20,8 @@ edition = { workspace = true } publish = false [features] -default = ["no_mangle", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] -no_mangle = [] +default = ["dynamic_plugin", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] +dynamic_plugin = [] [lib] # When auto-detecting the "example" plugin, `zenohd` will look for a dynamic library named "zenoh_plugin_example" diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 2b3450f804..304a3c6338 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -32,7 +32,7 @@ use zenoh_result::{bail, ZResult}; pub struct ExamplePlugin {} // declaration of the plugin's VTable for zenohd to find the plugin's functions to be called -#[cfg(feature = "no_mangle")] +#[cfg(feature = "dynamic_plugin")] zenoh_plugin_trait::declare_plugin!(ExamplePlugin); // A default selector for this example of storage plugin (in case the config doesn't set it) diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index 8d88368643..989bd1b86d 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -24,8 +24,8 @@ categories = ["network-programming", "web-programming::http-server"] description = "The zenoh REST plugin" [features] -default = ["no_mangle", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] -no_mangle = [] +default = ["dynamic_plugin", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] +dynamic_plugin = [] [lib] name = "zenoh_plugin_rest" diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 459df5c998..aff09f7198 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -189,7 +189,7 @@ fn response(status: StatusCode, content_type: impl TryInto, body: &str) -> builder.build() } -#[cfg(feature = "no_mangle")] +#[cfg(feature = "dynamic_plugin")] zenoh_plugin_trait::declare_plugin!(RestPlugin); pub struct RestPlugin {} diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 1e37093a78..058722965f 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -24,8 +24,8 @@ categories = { workspace = true } description = "The zenoh storages plugin." [features] -default = ["no_mangle", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] -no_mangle = [] +default = ["dynamic_plugin", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] +dynamic_plugin = [] [lib] name = "zenoh_plugin_storage_manager" diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 2c281144ce..e0529eff7e 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -52,7 +52,7 @@ mod memory_backend; mod replica; mod storages_mgt; -#[cfg(feature = "no_mangle")] +#[cfg(feature = "dynamic_plugin")] zenoh_plugin_trait::declare_plugin!(StoragesPlugin); pub struct StoragesPlugin {} From e53364f30717912fbef99cb16e26e59733097b58 Mon Sep 17 00:00:00 2001 From: kydos Date: Sat, 4 May 2024 09:27:04 +0200 Subject: [PATCH 332/357] Fixed performance issue with contiguous (#1008) (#1009) * Fixed performance issue with contiguous (#1008) * fixed format issue slipped in when fixing (#1008) * Update commons/zenoh-buffers/src/lib.rs Co-authored-by: Luca Cominardi --------- Co-authored-by: Luca Cominardi --- commons/zenoh-buffers/src/lib.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index 4dee599ea7..abd85d024c 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -101,11 +101,22 @@ pub mod buffer { let mut slices = self.slices(); match slices.len() { 0 => Cow::Borrowed(b""), - 1 => Cow::Borrowed(slices.next().unwrap()), - _ => Cow::Owned(slices.fold(Vec::new(), |mut acc, it| { - acc.extend(it); - acc - })), + 1 => { + // SAFETY: unwrap here is safe because we have explicitly checked + // the iterator has 1 element. + Cow::Borrowed(unsafe { slices.next().unwrap_unchecked() }) + } + _ => { + let mut l = 0; + for s in slices.by_ref() { + l += s.len(); + } + let mut vec = Vec::with_capacity(l); + for slice in slices { + vec.extend_from_slice(slice); + } + Cow::Owned(vec) + } } } } From 1e1027cd80d301c8a0615ca57b845506507bea1e Mon Sep 17 00:00:00 2001 From: Diogo Matsubara Date: Mon, 6 May 2024 10:13:42 +0200 Subject: [PATCH 333/357] Fix release workflow to use docker image attribute (#1011) In https://github.com/eclipse-zenoh/ci/pull/129/files, the action to release docker images was changed to use an image attribute instead of tags, so we can properly tag latest and nightly releases. --- .github/workflows/release.yml | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f5ff3fd2f9..e4e711ee27 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -164,29 +164,7 @@ jobs: version: ${{ needs.tag.outputs.version }} repo: ${{ github.repository }} branch: ${{ needs.tag.outputs.branch }} - tags: "eclipse/zenoh:${{ needs.tag.outputs.version }}" - binary: zenohd - files: | - zenohd - libzenoh_plugin_rest.so - libzenoh_plugin_storage_manager.so - platforms: | - linux/arm64 - linux/amd64 - licenses: EPL-2.0 OR Apache-2.0 - secrets: inherit - - ghcr: - name: Publish container image to GitHub Container Registry - needs: [tag, build-standalone] - uses: eclipse-zenoh/ci/.github/workflows/release-crates-ghcr.yml@main - with: - no-build: true - live-run: true - version: ${{ needs.tag.outputs.version }} - repo: ${{ github.repository }} - branch: ${{ needs.tag.outputs.branch }} - tags: "ghcr.io/${{ github.repository }}:${{ needs.tag.outputs.version }}" + image: "eclipse/zenoh" binary: zenohd files: | zenohd From 6338c634d9ac7e0649cffc7e2eec5a8329c13419 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Mon, 6 May 2024 16:19:40 +0300 Subject: [PATCH 334/357] Refine SHM alloc API --- .../api/provider/shared_memory_provider.rs | 194 ++++++++++++------ commons/zenoh-shm/src/api/provider/types.rs | 26 +++ examples/examples/z_alloc_shm.rs | 53 ++++- examples/examples/z_bytes_shm.rs | 9 +- examples/examples/z_get_shm.rs | 6 +- examples/examples/z_ping_shm.rs | 9 +- examples/examples/z_pub_shm.rs | 2 +- examples/examples/z_pub_shm_thr.rs | 9 +- examples/examples/z_queryable_shm.rs | 6 +- io/zenoh-transport/tests/unicast_shm.rs | 2 +- zenoh/src/api/bytes.rs | 2 +- zenoh/src/lib.rs | 14 +- zenoh/tests/bytes.rs | 2 +- zenoh/tests/shm.rs | 2 +- 14 files changed, 212 insertions(+), 124 deletions(-) diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index 1ca560f07e..658c96e162 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -25,7 +25,10 @@ use zenoh_result::ZResult; use super::{ chunk::{AllocatedChunk, ChunkDescriptor}, shared_memory_provider_backend::SharedMemoryProviderBackend, - types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, + types::{ + AllocAlignment, BufAllocResult, BufLayoutAllocResult, ChunkAllocResult, MemoryLayout, + ZAllocError, ZLayoutAllocError, ZLayoutError, + }, }; use crate::{ api::{buffer::zshmmut::ZShmMut, common::types::ProtocolID}, @@ -64,83 +67,69 @@ impl BusyChunk { } } -/// Builder to create AllocLayout -#[zenoh_macros::unstable_doc] -pub struct AllocLayoutBuilder<'a, IDSource, Backend> +struct AllocData<'a, IDSource, Backend> where IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend, { + size: usize, + alignment: AllocAlignment, provider: &'a SharedMemoryProvider, } -impl<'a, IDSource, Backend> AllocLayoutBuilder<'a, IDSource, Backend> -where - IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, -{ - /// Set size for layout - #[zenoh_macros::unstable_doc] - pub fn size(self, size: usize) -> AllocLayoutSizedBuilder<'a, IDSource, Backend> { - AllocLayoutSizedBuilder { - provider: self.provider, - size, - } - } -} #[zenoh_macros::unstable_doc] -pub struct AllocLayoutSizedBuilder<'a, IDSource, Backend> +pub struct AllocLayoutSizedBuilder<'a, IDSource, Backend>(AllocData<'a, IDSource, Backend>) where IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, -{ - provider: &'a SharedMemoryProvider, - size: usize, -} + Backend: SharedMemoryProviderBackend; + impl<'a, IDSource, Backend> AllocLayoutSizedBuilder<'a, IDSource, Backend> where IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend, { - /// Set alignment for layout + fn new(provider: &'a SharedMemoryProvider, size: usize) -> Self { + Self(AllocData { + provider, + size, + alignment: AllocAlignment::default(), + }) + } + + /// Set alignment #[zenoh_macros::unstable_doc] - pub fn alignment( - self, - alignment: AllocAlignment, - ) -> AllocLayoutAlignedBuilder<'a, IDSource, Backend> { - AllocLayoutAlignedBuilder { - provider: self.provider, - size: self.size, + pub fn with_alignment(self, alignment: AllocAlignment) -> Self { + Self(AllocData { + provider: self.0.provider, + size: self.0.size, alignment, - } + }) } - /// try to build an allocation layout + /// Try to build an allocation layout #[zenoh_macros::unstable_doc] - pub fn res(self) -> ZResult> { - AllocLayout::new(self.size, AllocAlignment::default(), self.provider) + pub fn make_layout(self) -> Result, ZLayoutError> { + AllocLayout::new(self.0) } -} -#[zenoh_macros::unstable_doc] -pub struct AllocLayoutAlignedBuilder<'a, IDSource, Backend> -where - IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, -{ - provider: &'a SharedMemoryProvider, - size: usize, - alignment: AllocAlignment, -} -impl<'a, IDSource, Backend> AllocLayoutAlignedBuilder<'a, IDSource, Backend> -where - IDSource: ProtocolIDSource, - Backend: SharedMemoryProviderBackend, -{ - /// Try to build layout with specified args + /// Set the allocation policy #[zenoh_macros::unstable_doc] - pub fn res(self) -> ZResult> { - AllocLayout::new(self.size, self.alignment, self.provider) + pub fn with_policy(self) -> AllocBuilder2<'a, IDSource, Backend, Policy> { + AllocBuilder2 { + data: self.0, + _phantom: PhantomData, + } + } + + /// Get the result + #[zenoh_macros::unstable_doc] + pub fn res(self) -> BufLayoutAllocResult { + let builder = AllocBuilder2::<'a, IDSource, Backend, JustAlloc> { + data: self.0, + _phantom: PhantomData, + }; + + builder.res() } } @@ -173,24 +162,25 @@ where } } - fn new( - size: usize, - alignment: AllocAlignment, - provider: &'a SharedMemoryProvider, - ) -> ZResult { + fn new(data: AllocData<'a, IDSource, Backend>) -> Result { // NOTE: Depending on internal implementation, provider's backend might relayout // the allocations for bigger alignment (ex. 4-byte aligned allocation to 8-bytes aligned) // Create layout for specified arguments - let layout = MemoryLayout::new(size, alignment)?; + let layout = MemoryLayout::new(data.size, data.alignment) + .map_err(|_| ZLayoutError::IncorrectLayoutArgs)?; // Obtain provider's layout for our layout - let provider_layout = provider.backend.layout_for(layout)?; + let provider_layout = data + .provider + .backend + .layout_for(layout) + .map_err(|_| ZLayoutError::ProviderIncompatibleLayout)?; Ok(Self { - size, + size: data.size, provider_layout, - provider, + provider: data.provider, }) } } @@ -511,6 +501,75 @@ unsafe impl<'a, Policy: AllocPolicy, IDSource, Backend: SharedMemoryProviderBack } }*/ +/// Builder for allocations +#[zenoh_macros::unstable_doc] +pub struct AllocBuilder2< + 'a, + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, + Policy = JustAlloc, +> { + data: AllocData<'a, IDSource, Backend>, + _phantom: PhantomData, +} + +// Generic impl +impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + /// Set the allocation policy + #[zenoh_macros::unstable_doc] + pub fn with_policy(self) -> AllocBuilder2<'a, IDSource, Backend, OtherPolicy> { + AllocBuilder2 { + data: self.data, + _phantom: PhantomData, + } + } +} + +// Alloc policy +impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, + Policy: AllocPolicy, +{ + /// Get the result + #[zenoh_macros::unstable_doc] + pub fn res(self) -> BufLayoutAllocResult { + let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; + + layout + .alloc() + .with_policy::() + .res() + .map_err(ZLayoutAllocError::Alloc) + } +} + +// Async Alloc policy +impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource + Send + Sync, + Backend: SharedMemoryProviderBackend + Sync, + Policy: AsyncAllocPolicy, +{ + /// Get the async result + #[zenoh_macros::unstable_doc] + pub async fn res_async(self) -> BufLayoutAllocResult { + let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; + + layout + .alloc() + .with_policy::() + .res_async() + .await + .map_err(ZLayoutAllocError::Alloc) + } +} + /// Builder for allocations #[zenoh_macros::unstable_doc] pub struct AllocBuilder< @@ -699,11 +758,10 @@ where IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend, { - /// Create layout builder associated with particular SharedMemoryProvider. - /// Layout is a rich interface to make allocations + /// Rich interface for making allocations #[zenoh_macros::unstable_doc] - pub fn alloc_layout(&self) -> AllocLayoutBuilder { - AllocLayoutBuilder { provider: self } + pub fn alloc(&self, size: usize) -> AllocLayoutSizedBuilder { + AllocLayoutSizedBuilder::new(self, size) } /// Defragment memory diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs index beae24bfb7..6e8ced7fc8 100644 --- a/commons/zenoh-shm/src/api/provider/types.rs +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -163,6 +163,17 @@ impl MemoryLayout { } } +/// Layouting errors +/// +/// IncorrectLayoutArgs: layout arguments are incorrect +/// ProviderIncompatibleLayout: layout incompatible with provider +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub enum ZLayoutError { + IncorrectLayoutArgs, + ProviderIncompatibleLayout, +} + /// SHM chunk allocation result #[zenoh_macros::unstable_doc] pub type ChunkAllocResult = Result; @@ -170,3 +181,18 @@ pub type ChunkAllocResult = Result; /// SHM buffer allocation result #[zenoh_macros::unstable_doc] pub type BufAllocResult = Result; + +/// Layouting and allocation errors +/// +/// Alloc: allocation error +/// Layout: layouting error +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub enum ZLayoutAllocError { + Alloc(ZAllocError), + Layout(ZLayoutError), +} + +/// SHM buffer layouting and allocation result +#[zenoh_macros::unstable_doc] +pub type BufLayoutAllocResult = Result; diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index 93df5d821d..d7d19de7f8 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -34,20 +34,51 @@ async fn run() -> ZResult<()> { .backend(backend) .res(); + // There are two API-defined ways of making shm buffer allocations: direct and through the layout... + + // Direct allocation + // The direct allocation calcualtes all layouting checks on each allocation. It is good for making + // uniquely-layouted allocations. For making series of similar allocations, please refer to layout + // allocation API which is shown later in this example... + let _direct_allocation = { + // OPTION: Simple allocation + let simple = provider.alloc(512).res().unwrap(); + + // OPTION: Allocation with custom alignemnt and alloc policy customization + let _comprehensive = provider + .alloc(512) + .with_alignment(AllocAlignment::new(2)) + // for more examples on policies, please see allocation policy usage below (for layout allocation API) + .with_policy::() + .res() + .unwrap(); + + // OPTION: Allocation with custom alignemnt and async alloc policy + let _async = provider + .alloc(512) + .with_alignment(AllocAlignment::new(2)) + // for more examples on policies, please see allocation policy usage below (for layout allocation API) + .with_policy::>>() + .res_async() + .await + .unwrap(); + + simple + }; + // Create a layout for particular allocation arguments and particular SHM provider // The layout is validated for argument correctness and also is checked // against particular SHM provider's layouting capabilities. // This layout is reusable and can handle series of similar allocations let buffer_layout = { - // OPTION 1: Simple (default) configuration: - let simple_layout = provider.alloc_layout().size(512).res().unwrap(); + // OPTION: Simple configuration: + let simple_layout = provider.alloc(512).make_layout().unwrap(); - // OPTION 2: Comprehensive configuration: + // OPTION: Comprehensive configuration: let _comprehensive_layout = provider - .alloc_layout() - .size(512) - .alignment(AllocAlignment::new(2)) - .res() + .alloc(512) + .with_alignment(AllocAlignment::new(2)) + .make_layout() .unwrap(); simple_layout @@ -69,10 +100,10 @@ async fn run() -> ZResult<()> { let mut sbuf = async { // Some examples on how to use layout's interface: - // The default allocation with default JustAlloc policy + // OPTION: The default allocation with default JustAlloc policy let default_alloc = buffer_layout.alloc().res().unwrap(); - // The async allocation + // OPTION: The async allocation let _async_alloc = buffer_layout .alloc() .with_policy::() @@ -80,14 +111,14 @@ async fn run() -> ZResult<()> { .await .unwrap(); - // The comprehensive allocation policy that blocks if provider is not able to allocate + // OPTION: The comprehensive allocation policy that blocks if provider is not able to allocate let _comprehensive_alloc = buffer_layout .alloc() .with_policy::>>() .res() .unwrap(); - // The comprehensive allocation policy that deallocates up to 1000 buffers if provider is not able to allocate + // OPTION: The comprehensive allocation policy that deallocates up to 1000 buffers if provider is not able to allocate let _comprehensive_alloc = buffer_layout .alloc() .with_policy::>>() diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs index 5c582e56e6..970ff2bae4 100644 --- a/examples/examples/z_bytes_shm.rs +++ b/examples/examples/z_bytes_shm.rs @@ -36,14 +36,7 @@ fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let mut owned_shm_buf_mut = provider - .alloc_layout() - .size(1024) - .res() - .unwrap() - .alloc() - .res() - .unwrap(); + let mut owned_shm_buf_mut = provider.alloc(1024).res().unwrap(); // mutable and immutable API let _data: &[u8] = &owned_shm_buf_mut; diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 39caf3a101..19e66b09f8 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -53,11 +53,7 @@ async fn main() { // NOTE: For buf's API please check z_bytes_shm.rs example println!("Allocating Shared Memory Buffer..."); let mut sbuf = provider - .alloc_layout() - .size(1024) - .res() - .unwrap() - .alloc() + .alloc(1024) .with_policy::>() .res_async() .await diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index 4c3ad4ed40..c53669fc44 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -61,14 +61,7 @@ fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let buf = provider - .alloc_layout() - .size(size) - .res() - .unwrap() - .alloc() - .res() - .unwrap(); + let buf = provider.alloc(size).res().unwrap(); // convert ZShmMut into ZSlice as ZShmMut does not support Clone let buf: ZSlice = buf.into(); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index d2a87a59cc..f07341a088 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -50,7 +50,7 @@ async fn main() -> Result<(), ZError> { // Create allocation layout for series of similar allocations println!("Allocating Shared Memory Buffer..."); - let layout = provider.alloc_layout().size(1024).res().unwrap(); + let layout = provider.alloc(1024).make_layout().unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 0d44fbe6ee..47b54b0589 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -44,14 +44,7 @@ async fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let mut buf = provider - .alloc_layout() - .size(size) - .res() - .unwrap() - .alloc() - .res() - .unwrap(); + let mut buf = provider.alloc(size).res().unwrap(); for b in buf.as_mut() { *b = rand::random::(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index ed2320d2c5..685b162a5a 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -73,11 +73,7 @@ async fn main() { // NOTE: For buf's API please check z_bytes_shm.rs example println!("Allocating Shared Memory Buffer..."); let mut sbuf = provider - .alloc_layout() - .size(1024) - .res() - .unwrap() - .alloc() + .alloc(1024) .with_policy::>() .res_async() .await diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index f7b884f6b9..981b856235 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -241,7 +241,7 @@ mod tests { ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_net01)).unwrap(); assert!(!peer_net01_transport.is_shm().unwrap()); - let layout = shm01.alloc_layout().size(MSG_SIZE).res().unwrap(); + let layout = shm01.alloc(MSG_SIZE).make_layout().unwrap(); // Send the message println!("Transport SHM [3a]"); diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 98afd1a3c3..3857019215 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1961,7 +1961,7 @@ mod tests { .res(); // Prepare a layout for allocations - let layout = provider.alloc_layout().size(1024).res().unwrap(); + let layout = provider.alloc(1024).make_layout().unwrap(); // allocate an SHM buffer let mutable_shm_buf = layout.alloc().res().unwrap(); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index caf961984b..e48388f5e7 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -398,15 +398,17 @@ pub mod shm { }, provider::{ shared_memory_provider::{ - AllocBuilder, AllocLayout, AllocLayoutAlignedBuilder, AllocLayoutBuilder, - AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, - DeallocOptimal, DeallocYoungest, Deallocate, Defragment, DynamicProtocolID, - ForceDeallocPolicy, GarbageCollect, JustAlloc, ProtocolIDSource, - SharedMemoryProvider, SharedMemoryProviderBuilder, + AllocBuilder, AllocBuilder2, AllocLayout, AllocLayoutSizedBuilder, AllocPolicy, + AsyncAllocPolicy, BlockOn, DeallocEldest, DeallocOptimal, DeallocYoungest, + Deallocate, Defragment, DynamicProtocolID, ForceDeallocPolicy, GarbageCollect, + JustAlloc, ProtocolIDSource, SharedMemoryProvider, SharedMemoryProviderBuilder, SharedMemoryProviderBuilderBackendID, SharedMemoryProviderBuilderID, StaticProtocolID, }, - types::{AllocAlignment, BufAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError}, + types::{ + AllocAlignment, BufAllocResult, BufLayoutAllocResult, ChunkAllocResult, + MemoryLayout, ZAllocError, ZLayoutAllocError, ZLayoutError, + }, }, }; } diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index 6de12ab63f..0f26625fba 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -30,7 +30,7 @@ fn shm_bytes_single_buf() { .res(); // Prepare a layout for allocations - let layout = provider.alloc_layout().size(1024).res().unwrap(); + let layout = provider.alloc(1024).make_layout().unwrap(); // allocate an SHM buffer (ZShmMut) let owned_shm_buf_mut = layout.alloc().res().unwrap(); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 14f6985414..9c71126138 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -122,7 +122,7 @@ mod tests { let shm_segment_size = shm01.available(); // Prepare a layout for allocations - let layout = shm01.alloc_layout().size(size).res().unwrap(); + let layout = shm01.alloc(size).make_layout().unwrap(); // Put data println!("[PS][03b] Putting on peer02 session. {MSG_COUNT} msgs of {size} bytes."); From 7cf2eee99297cdaf89bdaf734fc1c633a17632b5 Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Mon, 6 May 2024 22:31:40 +0800 Subject: [PATCH 335/357] fix: remove the unsound atexit cleanup (#1015) * fix: remove the unsound atexit cleanup * fix: refine the unsafe function --- .../src/pub_sub/bin/z_pub_sub.rs | 1 + .../src/queryable_get/bin/z_queryable_get.rs | 1 + commons/zenoh-runtime/src/lib.rs | 22 +++++++++---------- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index 60bda80bda..9561f7016a 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -17,6 +17,7 @@ use zenoh::prelude::r#async::*; #[tokio::main] async fn main() { + let _z = zenoh_runtime::ZRuntimePoolGuard; zenoh_util::init_log_test(); let pub_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 1c82339392..78fb705fe8 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -18,6 +18,7 @@ use zenoh::prelude::r#async::*; #[tokio::main] async fn main() { + let _z = zenoh_runtime::ZRuntimePoolGuard; zenoh_util::init_log_test(); let queryable_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs index dcd46744e6..2023c63bb3 100644 --- a/commons/zenoh-runtime/src/lib.rs +++ b/commons/zenoh-runtime/src/lib.rs @@ -145,11 +145,17 @@ lazy_static! { .collect(); } -// To drop the data mannually since Rust does not drop static variables. -pub extern "C" fn cleanup() { - unsafe { - std::mem::drop((ZRUNTIME_POOL.deref() as *const ZRuntimePool).read()); - std::mem::drop((ZRUNTIME_INDEX.deref() as *const HashMap).read()); +// A runtime guard used to explicitly drop the static variables that Rust doesn't drop by default +pub struct ZRuntimePoolGuard; + +impl Drop for ZRuntimePoolGuard { + fn drop(&mut self) { + unsafe { + std::mem::drop((ZRUNTIME_POOL.deref() as *const ZRuntimePool).read()); + std::mem::drop( + (ZRUNTIME_INDEX.deref() as *const HashMap).read(), + ); + } } } @@ -157,12 +163,6 @@ pub struct ZRuntimePool(HashMap>); impl ZRuntimePool { fn new() -> Self { - // It has been recognized that using atexit within Windows DLL is problematic - #[cfg(not(target_os = "windows"))] - // Register a callback to clean the static variables. - unsafe { - libc::atexit(cleanup); - } Self(ZRuntime::iter().map(|zrt| (zrt, OnceLock::new())).collect()) } From 7e5d5e8f1822eb655f3a3a852286ead9cdaec5ab Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Tue, 7 May 2024 00:38:24 +0800 Subject: [PATCH 336/357] fix: `zenoh_session_multicast` test (#1004) * test: use the minimal setting * test: use a shorter timeout * fix: re-introduce the nextest timeout * test: add the debugging info * test: disable the windows ci * Fix deadlock in session::close * test: do not retry * chore: clean up all the auxiliary files --------- Co-authored-by: OlivierHecart --- zenoh/src/session.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index eaca07d964..9ff91ea990 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -529,10 +529,11 @@ impl Session { self.runtime.close().await?; } let mut state = zwrite!(self.state); - state.primitives.as_ref().unwrap().send_close(); // clean up to break cyclic references from self.state to itself - state.primitives.take(); + let primitives = state.primitives.take(); state.queryables.clear(); + drop(state); + primitives.as_ref().unwrap().send_close(); self.alive = false; Ok(()) }) From b8dd01d6ce05f851f33497e8d899df308d315afb Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Tue, 7 May 2024 11:46:14 +0200 Subject: [PATCH 337/357] feat: Improve proc-macro `zenoh_macros::unstable` (#1016) --- commons/zenoh-macros/src/lib.rs | 122 ++++++++++++++++++++++++++++---- zenoh/Cargo.toml | 3 +- zenoh/src/lib.rs | 2 + zenoh/src/query.rs | 10 +-- 4 files changed, 114 insertions(+), 23 deletions(-) diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index 4a69f5c4e3..774bebc80a 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -18,8 +18,8 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use proc_macro::TokenStream; -use quote::quote; -use syn::LitStr; +use quote::{quote, ToTokens}; +use syn::{parse_macro_input, parse_quote, Attribute, Error, Item, LitStr, TraitItem}; use zenoh_keyexpr::{ format::{ macro_support::{self, SegmentBuilder}, @@ -59,19 +59,113 @@ pub fn rustc_version_release(_tokens: TokenStream) -> TokenStream { (quote! {(#release, #commit)}).into() } +/// An enumeration of items supported by the [`unstable`] attribute. +enum UnstableItem { + /// Wrapper around [`syn::Item`]. + Item(Item), + /// Wrapper around [`syn::TraitItem`]. + TraitItem(TraitItem), +} + +macro_rules! parse_unstable_item { + ($tokens:ident) => {{ + let item: Item = parse_macro_input!($tokens as Item); + + if matches!(item, Item::Verbatim(_)) { + let tokens = TokenStream::from(item.to_token_stream()); + let trait_item: TraitItem = parse_macro_input!(tokens as TraitItem); + + if matches!(trait_item, TraitItem::Verbatim(_)) { + Err(Error::new_spanned( + trait_item, + "the `unstable` proc-macro attribute only supports items and trait items", + )) + } else { + Ok(UnstableItem::TraitItem(trait_item)) + } + } else { + Ok(UnstableItem::Item(item)) + } + }}; +} + +impl UnstableItem { + /// Mutably borrows the attribute list of this item. + fn attributes_mut(&mut self) -> Result<&mut Vec, Error> { + match self { + UnstableItem::Item(item) => match item { + Item::Const(item) => Ok(&mut item.attrs), + Item::Enum(item) => Ok(&mut item.attrs), + Item::ExternCrate(item) => Ok(&mut item.attrs), + Item::Fn(item) => Ok(&mut item.attrs), + Item::ForeignMod(item) => Ok(&mut item.attrs), + Item::Impl(item) => Ok(&mut item.attrs), + Item::Macro(item) => Ok(&mut item.attrs), + Item::Mod(item) => Ok(&mut item.attrs), + Item::Static(item) => Ok(&mut item.attrs), + Item::Struct(item) => Ok(&mut item.attrs), + Item::Trait(item) => Ok(&mut item.attrs), + Item::TraitAlias(item) => Ok(&mut item.attrs), + Item::Type(item) => Ok(&mut item.attrs), + Item::Union(item) => Ok(&mut item.attrs), + Item::Use(item) => Ok(&mut item.attrs), + other => Err(Error::new_spanned( + other, + "item is not supported by the `unstable` proc-macro attribute", + )), + }, + UnstableItem::TraitItem(trait_item) => match trait_item { + TraitItem::Const(trait_item) => Ok(&mut trait_item.attrs), + TraitItem::Fn(trait_item) => Ok(&mut trait_item.attrs), + TraitItem::Type(trait_item) => Ok(&mut trait_item.attrs), + TraitItem::Macro(trait_item) => Ok(&mut trait_item.attrs), + other => Err(Error::new_spanned( + other, + "item is not supported by the `unstable` proc-macro attribute", + )), + }, + } + } + + /// Converts this item to a `proc_macro2::TokenStream`. + fn to_token_stream(&self) -> proc_macro2::TokenStream { + match self { + UnstableItem::Item(item) => item.to_token_stream(), + UnstableItem::TraitItem(trait_item) => trait_item.to_token_stream(), + } + } +} + #[proc_macro_attribute] -pub fn unstable(_attr: TokenStream, item: TokenStream) -> TokenStream { - let item = proc_macro2::TokenStream::from(item); - TokenStream::from(quote! { - #[cfg(feature = "unstable")] - ///
- /// 🔬 - /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. - /// To use it, you must enable zenoh's unstable feature flag. - ///
- /// - #item - }) +pub fn unstable(_attr: TokenStream, tokens: TokenStream) -> TokenStream { + let mut item = match parse_unstable_item!(tokens) { + Ok(item) => item, + Err(err) => return err.into_compile_error().into(), + }; + + let attrs = match item.attributes_mut() { + Ok(attrs) => attrs, + Err(err) => return err.into_compile_error().into(), + }; + + if attrs.iter().any(is_doc_attribute) { + // See: https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html#adding-a-warning-block + let message = "
This API has been marked as unstable: it works as advertised, but it may be changed in a future release.
"; + let note: Attribute = parse_quote!(#[doc = #message]); + attrs.push(note); + } + + let feature_gate: Attribute = parse_quote!(#[cfg(feature = "unstable")]); + attrs.push(feature_gate); + + TokenStream::from(item.to_token_stream()) +} + +/// Returns `true` if the attribute is a `#[doc = "..."]` attribute. +fn is_doc_attribute(attr: &Attribute) -> bool { + attr.path() + .get_ident() + .is_some_and(|ident| &ident.to_string() == "doc") } fn keformat_support(source: &str) -> proc_macro2::TokenStream { diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 7c9288731f..a8bf041c11 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -77,7 +77,7 @@ form_urlencoded = { workspace = true } futures = { workspace = true } git-version = { workspace = true } lazy_static = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } ordered-float = { workspace = true } paste = { workspace = true } petgraph = { workspace = true } @@ -119,6 +119,7 @@ name = "zenoh" # NOTE: if you change this, also change it in .github/workflows/release.yml in "doc" job. [package.metadata.docs.rs] features = ["unstable"] +rustdoc-args = ["--cfg", "doc_auto_cfg"] [package.metadata.deb] name = "zenoh" diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 8a41464267..3693218291 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -12,6 +12,8 @@ // ZettaScale Zenoh Team, // +#![cfg_attr(doc_auto_cfg, feature(doc_auto_cfg))] + //! [Zenoh](https://zenoh.io) /zeno/ is a stack that unifies data in motion, data at //! rest and computations. It elegantly blends traditional pub/sub with geo distributed //! storage, queries and computations, while retaining a level of time and space efficiency diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index f75df8c50e..8ac43ee4ba 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -355,19 +355,13 @@ pub(crate) const _REPLY_KEY_EXPR_ANY_SEL_PARAM: &str = "_anyke"; pub const REPLY_KEY_EXPR_ANY_SEL_PARAM: &str = _REPLY_KEY_EXPR_ANY_SEL_PARAM; #[zenoh_macros::unstable] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] pub enum ReplyKeyExpr { Any, + #[default] MatchingQuery, } -#[zenoh_macros::unstable] -impl Default for ReplyKeyExpr { - fn default() -> Self { - ReplyKeyExpr::MatchingQuery - } -} - impl Resolvable for GetBuilder<'_, '_, Handler> where Handler: IntoCallbackReceiverPair<'static, Reply> + Send, From c2bbe56cf23d2ea85a181a078d07b45c6b113d6a Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 7 May 2024 15:59:57 +0300 Subject: [PATCH 338/357] Implement Wait and IntoFuture for SHM allocators --- .../api/provider/shared_memory_provider.rs | 132 +++++++++++------- examples/examples/z_alloc_shm.rs | 16 +-- examples/examples/z_bytes_shm.rs | 10 +- examples/examples/z_get_shm.rs | 1 - examples/examples/z_ping_shm.rs | 2 +- examples/examples/z_pub_shm.rs | 3 +- examples/examples/z_pub_shm_thr.rs | 2 +- examples/examples/z_queryable_shm.rs | 1 - io/zenoh-transport/tests/unicast_shm.rs | 16 +-- zenoh/src/api/bytes.rs | 6 +- zenoh/tests/bytes.rs | 2 +- zenoh/tests/shm.rs | 9 +- 12 files changed, 109 insertions(+), 91 deletions(-) diff --git a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs index 658c96e162..811ff9ec57 100644 --- a/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs +++ b/commons/zenoh-shm/src/api/provider/shared_memory_provider.rs @@ -14,12 +14,15 @@ use std::{ collections::VecDeque, + future::{Future, IntoFuture}, marker::PhantomData, + pin::Pin, sync::{atomic::Ordering, Arc, Mutex}, time::Duration, }; use async_trait::async_trait; +use zenoh_core::{Resolvable, Wait}; use zenoh_result::ZResult; use super::{ @@ -108,7 +111,7 @@ where /// Try to build an allocation layout #[zenoh_macros::unstable_doc] - pub fn make_layout(self) -> Result, ZLayoutError> { + pub fn into_layout(self) -> Result, ZLayoutError> { AllocLayout::new(self.0) } @@ -120,16 +123,29 @@ where _phantom: PhantomData, } } +} - /// Get the result - #[zenoh_macros::unstable_doc] - pub fn res(self) -> BufLayoutAllocResult { +#[zenoh_macros::unstable_doc] +impl<'a, IDSource, Backend> Resolvable for AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + type To = BufLayoutAllocResult; +} + +// Sync alloc policy +impl<'a, IDSource, Backend> Wait for AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + fn wait(self) -> ::To { let builder = AllocBuilder2::<'a, IDSource, Backend, JustAlloc> { data: self.0, _phantom: PhantomData, }; - - builder.res() + builder.wait() } } @@ -262,10 +278,7 @@ pub trait AllocPolicy { #[zenoh_macros::unstable_doc] #[async_trait] pub trait AsyncAllocPolicy { - async fn alloc_async< - IDSource: ProtocolIDSource + Send + Sync, - Backend: SharedMemoryProviderBackend + Sync, - >( + async fn alloc_async( layout: &MemoryLayout, provider: &SharedMemoryProvider, ) -> ChunkAllocResult; @@ -403,13 +416,14 @@ where { _phantom: PhantomData, } + #[async_trait] impl AsyncAllocPolicy for BlockOn where InnerPolicy: AllocPolicy, { async fn alloc_async< - IDSource: ProtocolIDSource + Send + Sync, + IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend + Sync, >( layout: &MemoryLayout, @@ -529,44 +543,54 @@ where } } -// Alloc policy -impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +impl<'a, IDSource, Backend, Policy> Resolvable for AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + type To = BufLayoutAllocResult; +} + +// Sync alloc policy +impl<'a, IDSource, Backend, Policy> Wait for AllocBuilder2<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend, Policy: AllocPolicy, { - /// Get the result - #[zenoh_macros::unstable_doc] - pub fn res(self) -> BufLayoutAllocResult { + fn wait(self) -> ::To { let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; layout .alloc() .with_policy::() - .res() + .wait() .map_err(ZLayoutAllocError::Alloc) } } -// Async Alloc policy -impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +// Async alloc policy +impl<'a, IDSource, Backend, Policy> IntoFuture for AllocBuilder2<'a, IDSource, Backend, Policy> where - IDSource: ProtocolIDSource + Send + Sync, + IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend + Sync, Policy: AsyncAllocPolicy, { - /// Get the async result - #[zenoh_macros::unstable_doc] - pub async fn res_async(self) -> BufLayoutAllocResult { - let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; - - layout - .alloc() - .with_policy::() - .res_async() - .await - .map_err(ZLayoutAllocError::Alloc) + type Output = ::To; + type IntoFuture = Pin::To> + 'a>>; + + fn into_future(self) -> Self::IntoFuture { + Box::pin( + async move { + let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; + layout + .alloc() + .with_policy::() + .await + .map_err(ZLayoutAllocError::Alloc) + } + .into_future(), + ) } } @@ -598,36 +622,48 @@ where } } -// Alloc policy -impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +impl<'a, IDSource, Backend, Policy> Resolvable for AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: SharedMemoryProviderBackend, +{ + type To = BufAllocResult; +} + +// Sync alloc policy +impl<'a, IDSource, Backend, Policy> Wait for AllocBuilder<'a, IDSource, Backend, Policy> where IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend, Policy: AllocPolicy, { - /// Get the result - #[zenoh_macros::unstable_doc] - pub fn res(self) -> BufAllocResult { + fn wait(self) -> ::To { self.layout .provider .alloc_inner::(self.layout.size, &self.layout.provider_layout) } } -// Async Alloc policy -impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +// Async alloc policy +impl<'a, IDSource, Backend, Policy> IntoFuture for AllocBuilder<'a, IDSource, Backend, Policy> where - IDSource: ProtocolIDSource + Send + Sync, + IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend + Sync, Policy: AsyncAllocPolicy, { - /// Get the async result - #[zenoh_macros::unstable_doc] - pub async fn res_async(self) -> BufAllocResult { - self.layout - .provider - .alloc_inner_async::(self.layout.size, &self.layout.provider_layout) - .await + type Output = ::To; + type IntoFuture = Pin::To> + 'a>>; + + fn into_future(self) -> Self::IntoFuture { + Box::pin( + async move { + self.layout + .provider + .alloc_inner_async::(self.layout.size, &self.layout.provider_layout) + .await + } + .into_future(), + ) } } @@ -703,7 +739,7 @@ where /// Trait to create ProtocolID sources for SharedMemoryProvider #[zenoh_macros::unstable_doc] -pub trait ProtocolIDSource { +pub trait ProtocolIDSource: Send + Sync { fn id(&self) -> ProtocolID; } @@ -938,7 +974,7 @@ where // PRIVATE impls for Sync backend impl SharedMemoryProvider where - IDSource: ProtocolIDSource + Send + Sync, + IDSource: ProtocolIDSource, Backend: SharedMemoryProviderBackend + Sync, { async fn alloc_inner_async( diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs index d7d19de7f8..297626fc73 100644 --- a/examples/examples/z_alloc_shm.rs +++ b/examples/examples/z_alloc_shm.rs @@ -42,7 +42,7 @@ async fn run() -> ZResult<()> { // allocation API which is shown later in this example... let _direct_allocation = { // OPTION: Simple allocation - let simple = provider.alloc(512).res().unwrap(); + let simple = provider.alloc(512).wait().unwrap(); // OPTION: Allocation with custom alignemnt and alloc policy customization let _comprehensive = provider @@ -50,7 +50,7 @@ async fn run() -> ZResult<()> { .with_alignment(AllocAlignment::new(2)) // for more examples on policies, please see allocation policy usage below (for layout allocation API) .with_policy::() - .res() + .wait() .unwrap(); // OPTION: Allocation with custom alignemnt and async alloc policy @@ -59,7 +59,6 @@ async fn run() -> ZResult<()> { .with_alignment(AllocAlignment::new(2)) // for more examples on policies, please see allocation policy usage below (for layout allocation API) .with_policy::>>() - .res_async() .await .unwrap(); @@ -72,13 +71,13 @@ async fn run() -> ZResult<()> { // This layout is reusable and can handle series of similar allocations let buffer_layout = { // OPTION: Simple configuration: - let simple_layout = provider.alloc(512).make_layout().unwrap(); + let simple_layout = provider.alloc(512).into_layout().unwrap(); // OPTION: Comprehensive configuration: let _comprehensive_layout = provider .alloc(512) .with_alignment(AllocAlignment::new(2)) - .make_layout() + .into_layout() .unwrap(); simple_layout @@ -101,13 +100,12 @@ async fn run() -> ZResult<()> { // Some examples on how to use layout's interface: // OPTION: The default allocation with default JustAlloc policy - let default_alloc = buffer_layout.alloc().res().unwrap(); + let default_alloc = buffer_layout.alloc().wait().unwrap(); // OPTION: The async allocation let _async_alloc = buffer_layout .alloc() .with_policy::() - .res_async() .await .unwrap(); @@ -115,14 +113,14 @@ async fn run() -> ZResult<()> { let _comprehensive_alloc = buffer_layout .alloc() .with_policy::>>() - .res() + .wait() .unwrap(); // OPTION: The comprehensive allocation policy that deallocates up to 1000 buffers if provider is not able to allocate let _comprehensive_alloc = buffer_layout .alloc() .with_policy::>>() - .res() + .wait() .unwrap(); default_alloc diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs index 970ff2bae4..60a50ba0d1 100644 --- a/examples/examples/z_bytes_shm.rs +++ b/examples/examples/z_bytes_shm.rs @@ -11,13 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::{ - bytes::ZBytes, - shm::{ - zshm, zshmmut, PosixSharedMemoryProviderBackend, SharedMemoryProviderBuilder, ZShm, - ZShmMut, POSIX_PROTOCOL_ID, - }, -}; +use zenoh::prelude::*; fn main() { // create an SHM backend... @@ -36,7 +30,7 @@ fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let mut owned_shm_buf_mut = provider.alloc(1024).res().unwrap(); + let mut owned_shm_buf_mut = provider.alloc(1024).wait().unwrap(); // mutable and immutable API let _data: &[u8] = &owned_shm_buf_mut; diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs index 19e66b09f8..05d5f6ae7a 100644 --- a/examples/examples/z_get_shm.rs +++ b/examples/examples/z_get_shm.rs @@ -55,7 +55,6 @@ async fn main() { let mut sbuf = provider .alloc(1024) .with_policy::>() - .res_async() .await .unwrap(); diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs index c53669fc44..2e4d5f86f8 100644 --- a/examples/examples/z_ping_shm.rs +++ b/examples/examples/z_ping_shm.rs @@ -61,7 +61,7 @@ fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let buf = provider.alloc(size).res().unwrap(); + let buf = provider.alloc(size).wait().unwrap(); // convert ZShmMut into ZSlice as ZShmMut does not support Clone let buf: ZSlice = buf.into(); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index f07341a088..d5a6c56a67 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -50,7 +50,7 @@ async fn main() -> Result<(), ZError> { // Create allocation layout for series of similar allocations println!("Allocating Shared Memory Buffer..."); - let layout = provider.alloc(1024).make_layout().unwrap(); + let layout = provider.alloc(1024).into_layout().unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { @@ -60,7 +60,6 @@ async fn main() -> Result<(), ZError> { let mut sbuf = layout .alloc() .with_policy::>() - .res_async() .await .unwrap(); diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 47b54b0589..d3e6d50181 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -44,7 +44,7 @@ async fn main() { // Allocate an SHM buffer // NOTE: For allocation API please check z_alloc_shm.rs example // NOTE: For buf's API please check z_bytes_shm.rs example - let mut buf = provider.alloc(size).res().unwrap(); + let mut buf = provider.alloc(size).wait().unwrap(); for b in buf.as_mut() { *b = rand::random::(); diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs index 685b162a5a..80bbafb076 100644 --- a/examples/examples/z_queryable_shm.rs +++ b/examples/examples/z_queryable_shm.rs @@ -75,7 +75,6 @@ async fn main() { let mut sbuf = provider .alloc(1024) .with_policy::>() - .res_async() .await .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 981b856235..1b2369e620 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -241,18 +241,15 @@ mod tests { ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_net01)).unwrap(); assert!(!peer_net01_transport.is_shm().unwrap()); - let layout = shm01.alloc(MSG_SIZE).make_layout().unwrap(); + let layout = shm01.alloc(MSG_SIZE).into_layout().unwrap(); // Send the message println!("Transport SHM [3a]"); // The msg count for (msg_count, _) in (0..MSG_COUNT).enumerate() { // Create the message to send - let mut sbuf = ztimeout!(layout - .alloc() - .with_policy::>() - .res_async()) - .unwrap(); + let mut sbuf = + ztimeout!(layout.alloc().with_policy::>()).unwrap(); sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); let message: NetworkMessage = Push { @@ -292,11 +289,8 @@ mod tests { // The msg count for (msg_count, _) in (0..MSG_COUNT).enumerate() { // Create the message to send - let mut sbuf = ztimeout!(layout - .alloc() - .with_policy::>() - .res_async()) - .unwrap(); + let mut sbuf = + ztimeout!(layout.alloc().with_policy::>()).unwrap(); sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); let message: NetworkMessage = Push { diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs index 3857019215..4b6a8fc33b 100644 --- a/zenoh/src/api/bytes.rs +++ b/zenoh/src/api/bytes.rs @@ -1831,6 +1831,8 @@ mod tests { use rand::Rng; use zenoh_buffers::{ZBuf, ZSlice}; + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + use zenoh_core::Wait; use zenoh_protocol::core::Properties; #[cfg(all(feature = "shared-memory", feature = "unstable"))] use zenoh_shm::api::{ @@ -1961,10 +1963,10 @@ mod tests { .res(); // Prepare a layout for allocations - let layout = provider.alloc(1024).make_layout().unwrap(); + let layout = provider.alloc(1024).into_layout().unwrap(); // allocate an SHM buffer - let mutable_shm_buf = layout.alloc().res().unwrap(); + let mutable_shm_buf = layout.alloc().wait().unwrap(); // convert to immutable SHM buffer let immutable_shm_buf: ZShm = mutable_shm_buf.into(); diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index 0f26625fba..504406f00c 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -30,7 +30,7 @@ fn shm_bytes_single_buf() { .res(); // Prepare a layout for allocations - let layout = provider.alloc(1024).make_layout().unwrap(); + let layout = provider.alloc(1024).into_layout().unwrap(); // allocate an SHM buffer (ZShmMut) let owned_shm_buf_mut = layout.alloc().res().unwrap(); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 9c71126138..20fb04e813 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -122,17 +122,14 @@ mod tests { let shm_segment_size = shm01.available(); // Prepare a layout for allocations - let layout = shm01.alloc(size).make_layout().unwrap(); + let layout = shm01.alloc(size).into_layout().unwrap(); // Put data println!("[PS][03b] Putting on peer02 session. {MSG_COUNT} msgs of {size} bytes."); for c in 0..msg_count { // Allocate new message - let sbuf = ztimeout!(layout - .alloc() - .with_policy::>() - .res_async()) - .unwrap(); + let sbuf = + ztimeout!(layout.alloc().with_policy::>()).unwrap(); println!("{c} created"); // Publish this message From 416fb28f45efc88d01ef84fd25295a68f2d6e4c8 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 7 May 2024 16:24:39 +0300 Subject: [PATCH 339/357] fix clippy --- zenoh/tests/bytes.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs index 504406f00c..039c1b1986 100644 --- a/zenoh/tests/bytes.rs +++ b/zenoh/tests/bytes.rs @@ -33,7 +33,7 @@ fn shm_bytes_single_buf() { let layout = provider.alloc(1024).into_layout().unwrap(); // allocate an SHM buffer (ZShmMut) - let owned_shm_buf_mut = layout.alloc().res().unwrap(); + let owned_shm_buf_mut = layout.alloc().wait().unwrap(); // convert into immutable owned buffer (ZShmMut -> ZSlceShm) let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); From 86cf27d5df472545dc351729cb22a6f74a5ded1f Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Wed, 8 May 2024 15:17:17 +0300 Subject: [PATCH 340/357] fix port collision in tests --- zenoh/tests/shm.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs index 20fb04e813..3a0447fc92 100644 --- a/zenoh/tests/shm.rs +++ b/zenoh/tests/shm.rs @@ -176,7 +176,7 @@ mod tests { // Initiate logging zenoh_util::try_init_log_from_env(); - let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; + let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:19447"]).await; test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; close_session(peer01, peer02).await; }); @@ -190,7 +190,7 @@ mod tests { zenoh_util::try_init_log_from_env(); let (peer01, peer02) = - open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; + open_session_multicast("udp/224.0.0.1:19448", "udp/224.0.0.1:19448").await; test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; close_session(peer01, peer02).await; }); From 45e05f0e9375b03b7765bcdd5df399ac9136ac04 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 13 May 2024 16:22:43 +0200 Subject: [PATCH 341/357] Fix bug in Query Timeout (#1021) --- zenoh/src/net/routing/dispatcher/face.rs | 5 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 7 ++- zenoh/src/net/routing/dispatcher/queries.rs | 64 ++++++++++++++++----- zenoh/src/net/tests/tables.rs | 5 ++ 4 files changed, 63 insertions(+), 18 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 653849ee5a..a31eb9d8ab 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -248,6 +248,7 @@ impl Primitives for Face { &self.state, &msg.wire_expr, msg.ext_qos, + msg.ext_tstamp, msg.payload, msg.ext_nodeid.node_id, ); @@ -260,10 +261,10 @@ impl Primitives for Face { &self.tables, &self.state, &msg.wire_expr, - // parameters, msg.id, msg.ext_target, - // consolidation, + msg.ext_budget, + msg.ext_timeout, msg.payload, msg.ext_nodeid.node_id, ); diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 46a00bd382..42f10517ea 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -429,6 +429,7 @@ pub fn full_reentrant_route_data( face: &FaceState, expr: &WireExpr, ext_qos: ext::QoSType, + ext_tstamp: Option, mut payload: PushBody, routing_context: NodeId, ) { @@ -478,7 +479,7 @@ pub fn full_reentrant_route_data( outface.primitives.send_push(Push { wire_expr: key_expr.into(), ext_qos, - ext_tstamp: None, + ext_tstamp, ext_nodeid: ext::NodeIdType { node_id: *context }, payload, }) @@ -513,7 +514,7 @@ pub fn full_reentrant_route_data( outface.primitives.send_push(Push { wire_expr: key_expr, ext_qos, - ext_tstamp: None, + ext_tstamp, ext_nodeid: ext::NodeIdType { node_id: context }, payload: payload.clone(), }) @@ -540,7 +541,7 @@ pub fn full_reentrant_route_data( outface.primitives.send_push(Push { wire_expr: key_expr.into(), ext_qos, - ext_tstamp: None, + ext_tstamp, ext_nodeid: ext::NodeIdType { node_id: *context }, payload: payload.clone(), }) diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 62eae0703e..bdb303d719 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -22,14 +22,21 @@ use std::collections::HashMap; use std::sync::{Arc, Weak}; use std::time::Duration; use tokio_util::sync::CancellationToken; +use zenoh_buffers::ZBuf; use zenoh_config::WhatAmI; use zenoh_protocol::core::key_expr::keyexpr; +use zenoh_protocol::core::KnownEncoding; use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; +use zenoh_protocol::zenoh; +use zenoh_protocol::zenoh::ext::ValueType; use zenoh_protocol::{ core::{Encoding, WireExpr}, network::{ declare::ext, - request::{ext::TargetType, Request, RequestId}, + request::{ + ext::{BudgetType, TargetType, TimeoutType}, + Request, RequestId, + }, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, zenoh::{reply::ext::ConsolidationType, Reply, RequestBody, ResponseBody}, @@ -365,6 +372,7 @@ struct QueryCleanup { tables: Arc, face: Weak, qid: RequestId, + timeout: Duration, } impl QueryCleanup { @@ -378,6 +386,7 @@ impl QueryCleanup { tables: tables_ref.clone(), face: Arc::downgrade(face), qid, + timeout, }; if let Some((_, cancellation_token)) = face.pending_queries.get(&qid) { let c_cancellation_token = cancellation_token.clone(); @@ -396,17 +405,42 @@ impl QueryCleanup { impl Timed for QueryCleanup { async fn run(&mut self) { if let Some(mut face) = self.face.upgrade() { - let tables_lock = zwrite!(self.tables.tables); + let ext_respid = Some(response::ext::ResponderIdType { + zid: face.zid, + eid: 0, + }); + route_send_response( + &self.tables, + &mut face, + self.qid, + ext_respid, + WireExpr::empty(), + ResponseBody::Err(zenoh::Err { + timestamp: None, + is_infrastructure: false, + ext_sinfo: None, + ext_unknown: vec![], + ext_body: Some(ValueType { + #[cfg(feature = "shared-memory")] + ext_shm: None, + payload: ZBuf::from("Timeout".as_bytes().to_vec()), + encoding: KnownEncoding::TextPlain.into(), + }), + code: 0, // TODO + }), + ); + let queries_lock = zwrite!(self.tables.queries_lock); if let Some(query) = get_mut_unchecked(&mut face) .pending_queries .remove(&self.qid) { - drop(tables_lock); + drop(queries_lock); tracing::warn!( - "Didn't receive final reply {}:{} from {}: Timeout!", + "Didn't receive final reply {}:{} from {}: Timeout({:#?})!", query.0.src_face, self.qid, - face + face, + self.timeout, ); finalize_pending_query(query); } @@ -513,12 +547,15 @@ macro_rules! inc_res_stats { }; } +#[allow(clippy::too_many_arguments)] pub fn route_query( tables_ref: &Arc, face: &Arc, expr: &WireExpr, qid: RequestId, - target: TargetType, + ext_target: TargetType, + ext_budget: Option, + ext_timeout: Option, body: RequestBody, routing_context: NodeId, ) { @@ -555,14 +592,15 @@ pub fn route_query( }); let queries_lock = zwrite!(tables_ref.queries_lock); - let route = compute_final_route(&rtables, &route, face, &mut expr, &target, query); + let route = + compute_final_route(&rtables, &route, face, &mut expr, &ext_target, query); let local_replies = rtables .hat_code .compute_local_replies(&rtables, &prefix, expr.suffix, face); let zid = rtables.zid; - let timeout = rtables.queries_default_timeout; + let timeout = ext_timeout.unwrap_or(rtables.queries_default_timeout); drop(queries_lock); drop(rtables); @@ -643,8 +681,8 @@ pub fn route_query( ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: *context }, ext_target: *t, - ext_budget: None, - ext_timeout: None, + ext_budget, + ext_timeout, payload: body.clone(), }, expr.full_expr().to_string(), @@ -673,9 +711,9 @@ pub fn route_query( ext_qos: ext::QoSType::request_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target: target, - ext_budget: None, - ext_timeout: None, + ext_target, + ext_budget, + ext_timeout, payload: body.clone(), }, expr.full_expr().to_string(), diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 1b02a5964f..ebd6e66681 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -626,6 +626,7 @@ fn client_test() { &face0.upgrade().unwrap(), &"test/client/z1_wr1".into(), ext::QoSType::default(), + None, PushBody::Put(Put { timestamp: None, encoding: Encoding::default(), @@ -659,6 +660,7 @@ fn client_test() { &face0.upgrade().unwrap(), &WireExpr::from(11).with_suffix("/z1_wr2"), ext::QoSType::default(), + None, PushBody::Put(Put { timestamp: None, encoding: Encoding::default(), @@ -692,6 +694,7 @@ fn client_test() { &face1.upgrade().unwrap(), &"test/client/**".into(), ext::QoSType::default(), + None, PushBody::Put(Put { timestamp: None, encoding: Encoding::default(), @@ -725,6 +728,7 @@ fn client_test() { &face0.upgrade().unwrap(), &12.into(), ext::QoSType::default(), + None, PushBody::Put(Put { timestamp: None, encoding: Encoding::default(), @@ -758,6 +762,7 @@ fn client_test() { &face1.upgrade().unwrap(), &22.into(), ext::QoSType::default(), + None, PushBody::Put(Put { timestamp: None, encoding: Encoding::default(), From cfb86a81bef94c6867d03d710424f3ec8256abd2 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 13 May 2024 18:21:48 +0200 Subject: [PATCH 342/357] Enhance subscribers, queryables and liveliness tokens propagation to improve scalability (#814) * Router implements interests protocol for clients * Send WireExpr in UndeclareSubscriber/UndeclareQueryable to clients for pico * Fix WireExprExt M flag encoding/decoding * Fix decl_key * Clients send all samples and queries to routers and peers * Avoid self declaration loop on interest * Fix query/replies copy/paste bugs * Peers implement interests protocol for clients * Don't send WireExpr in UndeclareSubscriber/UndeclareQueryable to clients * Add client writer-side filtering (#863) * Add client writer-side filtering * Reimplement liveliness with interests * Fix writer-side filtering before receiving FinalInterest * Fix pubsub interest based routing after router failover * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs * Remove unused UndeclareInterest * Implement proper Declare Request/Response id correlation * Add new Interest network message * Update doc * Update codec * Fix stable build * Fix test_acl * Fix writer side filtering * Add separate functions to compute matching status * Fix unstable imports * Remove useless checks --------- Co-authored-by: Luca Cominardi --- commons/zenoh-codec/src/network/declare.rs | 6 +- commons/zenoh-codec/src/network/interest.rs | 4 +- commons/zenoh-protocol/src/network/declare.rs | 13 + .../zenoh-protocol/src/network/interest.rs | 2 +- zenoh/src/api/builders/publication.rs | 29 +- zenoh/src/api/publication.rs | 37 +- zenoh/src/api/session.rs | 276 ++++++---- zenoh/src/api/subscriber.rs | 4 +- zenoh/src/net/primitives/demux.rs | 2 +- zenoh/src/net/primitives/mod.rs | 4 + zenoh/src/net/primitives/mux.rs | 50 ++ zenoh/src/net/routing/dispatcher/face.rs | 102 +++- zenoh/src/net/routing/dispatcher/pubsub.rs | 98 +++- zenoh/src/net/routing/dispatcher/queries.rs | 82 +++ zenoh/src/net/routing/dispatcher/resource.rs | 158 ++++-- zenoh/src/net/routing/hat/client/mod.rs | 3 + zenoh/src/net/routing/hat/client/pubsub.rs | 285 ++++++++-- zenoh/src/net/routing/hat/client/queries.rs | 57 +- .../src/net/routing/hat/linkstate_peer/mod.rs | 5 + .../net/routing/hat/linkstate_peer/pubsub.rs | 473 +++++++++++++---- .../net/routing/hat/linkstate_peer/queries.rs | 264 ++++++++-- zenoh/src/net/routing/hat/mod.rs | 60 ++- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 5 + zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 390 +++++++++++--- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 340 +++++++++--- zenoh/src/net/routing/hat/router/mod.rs | 5 + zenoh/src/net/routing/hat/router/pubsub.rs | 488 ++++++++++++++---- zenoh/src/net/routing/hat/router/queries.rs | 271 ++++++++-- zenoh/src/net/runtime/adminspace.rs | 5 + zenoh/src/net/tests/tables.rs | 2 + 30 files changed, 2856 insertions(+), 664 deletions(-) diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index faffb04952..7c3b797d5d 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -958,7 +958,7 @@ where if x.wire_expr.has_suffix() { flags |= 1; } - if let Mapping::Receiver = wire_expr.mapping { + if let Mapping::Sender = wire_expr.mapping { flags |= 1 << 1; } codec.write(&mut zriter, flags)?; @@ -998,9 +998,9 @@ where String::new() }; let mapping = if imsg::has_flag(flags, 1 << 1) { - Mapping::Receiver - } else { Mapping::Sender + } else { + Mapping::Receiver }; Ok(( diff --git a/commons/zenoh-codec/src/network/interest.rs b/commons/zenoh-codec/src/network/interest.rs index 2deda7748a..5ebdc91f71 100644 --- a/commons/zenoh-codec/src/network/interest.rs +++ b/commons/zenoh-codec/src/network/interest.rs @@ -23,8 +23,8 @@ use zenoh_protocol::{ core::WireExpr, network::{ declare, id, - interest::{self, InterestMode, InterestOptions}, - Interest, Mapping, + interest::{self, Interest, InterestMode, InterestOptions}, + Mapping, }, }; diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index a5373cd5f4..d8c66559ce 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -178,6 +178,19 @@ pub mod common { pub mod ext { use super::*; + /// Flags: + /// - N: Named If N==1 then the key expr has name/suffix + /// - M: Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver + /// + /// 7 6 5 4 3 2 1 0 + /// +-+-+-+-+-+-+-+-+ + /// |X|X|X|X|X|X|M|N| + /// +-+-+-+---------+ + /// ~ key_scope:z16 ~ + /// +---------------+ + /// ~ key_suffix ~ if N==1 -- + /// +---------------+ + /// pub type WireExprExt = zextzbuf!(0x0f, true); #[derive(Debug, Clone, PartialEq, Eq)] pub struct WireExprType { diff --git a/commons/zenoh-protocol/src/network/interest.rs b/commons/zenoh-protocol/src/network/interest.rs index 46797b72ee..b36080be28 100644 --- a/commons/zenoh-protocol/src/network/interest.rs +++ b/commons/zenoh-protocol/src/network/interest.rs @@ -121,7 +121,7 @@ pub mod flag { pub type DeclareRequestId = u32; pub type AtomicDeclareRequestId = AtomicU32; -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum InterestMode { Final, Current, diff --git a/zenoh/src/api/builders/publication.rs b/zenoh/src/api/builders/publication.rs index d4dc1b54d2..0b7bb01eae 100644 --- a/zenoh/src/api/builders/publication.rs +++ b/zenoh/src/api/builders/publication.rs @@ -314,8 +314,7 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { fn create_one_shot_publisher(self) -> ZResult> { Ok(Publisher { session: self.session, - #[cfg(feature = "unstable")] - eid: 0, // This is a one shot Publisher + id: 0, // This is a one shot Publisher key_expr: self.key_expr?, congestion_control: self.congestion_control, priority: self.priority, @@ -363,22 +362,16 @@ impl<'a, 'b> Wait for PublisherBuilder<'a, 'b> { } } self.session - .declare_publication_intent(key_expr.clone()) - .wait()?; - #[cfg(feature = "unstable")] - let eid = self.session.runtime.next_id(); - let publisher = Publisher { - session: self.session, - #[cfg(feature = "unstable")] - eid, - key_expr, - congestion_control: self.congestion_control, - priority: self.priority, - is_express: self.is_express, - destination: self.destination, - }; - tracing::trace!("publish({:?})", publisher.key_expr); - Ok(publisher) + .declare_publisher_inner(key_expr.clone(), self.destination) + .map(|id| Publisher { + session: self.session, + id, + key_expr, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + }) } } diff --git a/zenoh/src/api/publication.rs b/zenoh/src/api/publication.rs index 553170e76a..d72f18739d 100644 --- a/zenoh/src/api/publication.rs +++ b/zenoh/src/api/publication.rs @@ -14,6 +14,7 @@ use std::{ convert::TryFrom, + fmt, future::{IntoFuture, Ready}, pin::Pin, task::{Context, Poll}, @@ -32,9 +33,7 @@ use zenoh_result::{Error, ZResult}; use { crate::api::handlers::{Callback, DefaultHandler, IntoHandler}, crate::api::sample::SourceInfo, - crate::api::Id, zenoh_protocol::core::EntityGlobalId, - zenoh_protocol::core::EntityId, }; use super::{ @@ -48,7 +47,23 @@ use super::{ sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, session::{SessionRef, Undeclarable}, }; -use crate::net::primitives::Primitives; +use crate::{api::Id, net::primitives::Primitives}; + +pub(crate) struct PublisherState { + pub(crate) id: Id, + pub(crate) remote_id: Id, + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) destination: Locality, +} + +impl fmt::Debug for PublisherState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Publisher") + .field("id", &self.id) + .field("key_expr", &self.key_expr) + .finish() + } +} #[zenoh_macros::unstable] #[derive(Clone)] @@ -113,8 +128,7 @@ impl std::fmt::Debug for PublisherRef<'_> { #[derive(Debug, Clone)] pub struct Publisher<'a> { pub(crate) session: SessionRef<'a>, - #[cfg(feature = "unstable")] - pub(crate) eid: EntityId, + pub(crate) id: Id, pub(crate) key_expr: KeyExpr<'a>, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, @@ -142,7 +156,7 @@ impl<'a> Publisher<'a> { pub fn id(&self) -> EntityGlobalId { EntityGlobalId { zid: self.session.zid(), - eid: self.eid, + eid: self.id, } } @@ -459,11 +473,9 @@ impl Resolvable for PublisherUndeclaration<'_> { impl Wait for PublisherUndeclaration<'_> { fn wait(mut self) -> ::To { let Publisher { - session, key_expr, .. + session, id: eid, .. } = &self.publisher; - session - .undeclare_publication_intent(key_expr.clone()) - .wait()?; + session.undeclare_publisher_inner(*eid)?; self.publisher.key_expr = unsafe { keyexpr::from_str_unchecked("") }.into(); Ok(()) } @@ -481,10 +493,7 @@ impl IntoFuture for PublisherUndeclaration<'_> { impl Drop for Publisher<'_> { fn drop(&mut self) { if !self.key_expr.is_empty() { - let _ = self - .session - .undeclare_publication_intent(self.key_expr.clone()) - .wait(); + let _ = self.session.undeclare_publisher_inner(self.id); } } } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index 018a3a085e..e5087e693b 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -35,16 +35,19 @@ use zenoh_protocol::network::{declare::SubscriberId, ext}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, ExprId, Reliability, WireExpr, ZenohId, EMPTY_EXPR_ID, + AtomicExprId, CongestionControl, EntityId, ExprId, Reliability, WireExpr, ZenohId, + EMPTY_EXPR_ID, }, network::{ + self, declare::{ self, common::ext::WireExprType, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, + interest::{InterestMode, InterestOptions}, request::{self, ext::TargetType, Request}, - AtomicRequestId, Mapping, Push, RequestId, Response, ResponseFinal, + AtomicRequestId, Interest, Mapping, Push, RequestId, Response, ResponseFinal, }, zenoh::{ query::{self, ext::QueryBodyType, Consolidation}, @@ -68,7 +71,7 @@ use super::{ handlers::{Callback, DefaultHandler}, info::SessionInfo, key_expr::{KeyExpr, KeyExprInner}, - publication::Priority, + publication::{Priority, PublisherState}, query::{ConsolidationMode, GetBuilder, QueryConsolidation, QueryState, QueryTarget, Reply}, queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, @@ -107,7 +110,7 @@ pub(crate) struct SessionState { pub(crate) remote_resources: HashMap, #[cfg(feature = "unstable")] pub(crate) remote_subscribers: HashMap>, - //pub(crate) publications: Vec, + pub(crate) publishers: HashMap, pub(crate) subscribers: HashMap>, pub(crate) queryables: HashMap>, #[cfg(feature = "unstable")] @@ -116,13 +119,13 @@ pub(crate) struct SessionState { pub(crate) matching_listeners: HashMap>, pub(crate) queries: HashMap, pub(crate) aggregated_subscribers: Vec, - //pub(crate) aggregated_publishers: Vec, + pub(crate) aggregated_publishers: Vec, } impl SessionState { pub(crate) fn new( aggregated_subscribers: Vec, - _aggregated_publishers: Vec, + aggregated_publishers: Vec, ) -> SessionState { SessionState { primitives: None, @@ -132,7 +135,7 @@ impl SessionState { remote_resources: HashMap::new(), #[cfg(feature = "unstable")] remote_subscribers: HashMap::new(), - //publications: Vec::new(), + publishers: HashMap::new(), subscribers: HashMap::new(), queryables: HashMap::new(), #[cfg(feature = "unstable")] @@ -141,7 +144,7 @@ impl SessionState { matching_listeners: HashMap::new(), queries: HashMap::new(), aggregated_subscribers, - //aggregated_publishers, + aggregated_publishers, } } } @@ -916,84 +919,99 @@ impl Session { }) } - /// Declare a publication for the given key expression. - /// - /// Puts that match the given key expression will only be sent on the network - /// if matching subscribers exist in the system. - /// - /// # Arguments - /// - /// * `key_expr` - The key expression to publish - pub(crate) fn declare_publication_intent<'a>( - &'a self, - _key_expr: KeyExpr<'a>, - ) -> impl Resolve> + 'a { - ResolveClosure::new(move || { - // tracing::trace!("declare_publication({:?})", key_expr); - // let mut state = zwrite!(self.state); - // if !state.publications.iter().any(|p| **p == **key_expr) { - // let declared_pub = if let Some(join_pub) = state - // .aggregated_publishers - // .iter() - // .find(|s| s.includes(&key_expr)) - // { - // let joined_pub = state.publications.iter().any(|p| join_pub.includes(p)); - // (!joined_pub).then(|| join_pub.clone().into()) - // } else { - // Some(key_expr.clone()) - // }; - // state.publications.push(key_expr.into()); - - // if let Some(res) = declared_pub { - // let primitives = state.primitives.as_ref().unwrap().clone(); - // drop(state); - // primitives.decl_publisher(&res.to_wire(self), None); - // } - // } - Ok(()) - }) + pub(crate) fn declare_publisher_inner( + &self, + key_expr: KeyExpr, + destination: Locality, + ) -> ZResult { + let mut state = zwrite!(self.state); + tracing::trace!("declare_publisher({:?})", key_expr); + let id = self.runtime.next_id(); + + let mut pub_state = PublisherState { + id, + remote_id: id, + key_expr: key_expr.clone().into_owned(), + destination, + }; + + let declared_pub = (destination != Locality::SessionLocal) + .then(|| { + match state + .aggregated_publishers + .iter() + .find(|s| s.includes(&key_expr)) + { + Some(join_pub) => { + if let Some(joined_pub) = state.publishers.values().find(|p| { + p.destination != Locality::SessionLocal + && join_pub.includes(&p.key_expr) + }) { + pub_state.remote_id = joined_pub.remote_id; + None + } else { + Some(join_pub.clone().into()) + } + } + None => { + if let Some(twin_pub) = state.publishers.values().find(|p| { + p.destination != Locality::SessionLocal && p.key_expr == key_expr + }) { + pub_state.remote_id = twin_pub.remote_id; + None + } else { + Some(key_expr.clone()) + } + } + } + }) + .flatten(); + + state.publishers.insert(id, pub_state); + + if let Some(res) = declared_pub { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_interest(Interest { + id, + mode: InterestMode::CurrentFuture, + options: InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS, + wire_expr: Some(res.to_wire(self).to_owned()), + ext_qos: network::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: network::ext::NodeIdType::DEFAULT, + }); + } + Ok(id) } - /// Undeclare a publication previously declared - /// with [`declare_publication`](Session::declare_publication). - /// - /// # Arguments - /// - /// * `key_expr` - The key expression of the publication to undeclarte - pub(crate) fn undeclare_publication_intent<'a>( - &'a self, - _key_expr: KeyExpr<'a>, - ) -> impl Resolve> + 'a { - ResolveClosure::new(move || { - // let mut state = zwrite!(self.state); - // if let Some(idx) = state.publications.iter().position(|p| **p == *key_expr) { - // trace!("undeclare_publication({:?})", key_expr); - // state.publications.remove(idx); - // match state - // .aggregated_publishers - // .iter() - // .find(|s| s.includes(&key_expr)) - // { - // Some(join_pub) => { - // let joined_pub = state.publications.iter().any(|p| join_pub.includes(p)); - // if !joined_pub { - // let primitives = state.primitives.as_ref().unwrap().clone(); - // let key_expr = WireExpr::from(join_pub).to_owned(); - // drop(state); - // primitives.forget_publisher(&key_expr, None); - // } - // } - // None => { - // let primitives = state.primitives.as_ref().unwrap().clone(); - // drop(state); - // primitives.forget_publisher(&key_expr.to_wire(self), None); - // } - // }; - // } else { - // bail!("Unable to find publication") - // } + pub(crate) fn undeclare_publisher_inner(&self, pid: Id) -> ZResult<()> { + let mut state = zwrite!(self.state); + if let Some(pub_state) = state.publishers.remove(&pid) { + trace!("undeclare_publisher({:?})", pub_state); + if pub_state.destination != Locality::SessionLocal { + // Note: there might be several publishers on the same KeyExpr. + // Before calling forget_publishers(key_expr), check if this was the last one. + if !state.publishers.values().any(|p| { + p.destination != Locality::SessionLocal && p.remote_id == pub_state.remote_id + }) { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_interest(Interest { + id: pub_state.remote_id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: declare::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + }); + } + } Ok(()) - }) + } else { + Err(zerror!("Unable to find publisher").into()) + } } pub(crate) fn declare_subscriber_inner( @@ -1005,7 +1023,7 @@ impl Session { info: &SubscriberInfo, ) -> ZResult> { let mut state = zwrite!(self.state); - tracing::trace!("subscribe({:?})", key_expr); + tracing::trace!("declare_subscriber({:?})", key_expr); let id = self.runtime.next_id(); let key_expr = match scope { Some(scope) => scope / key_expr, @@ -1126,15 +1144,34 @@ impl Session { let state = zread!(self.state); self.update_status_up(&state, &key_expr) } + } else { + #[cfg(feature = "unstable")] + if key_expr + .as_str() + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) + { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + + primitives.send_interest(Interest { + id, + mode: InterestMode::CurrentFuture, + options: InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS, + wire_expr: Some(key_expr.to_wire(self).to_owned()), + ext_qos: network::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: network::ext::NodeIdType::DEFAULT, + }); + } } Ok(sub_state) } - pub(crate) fn unsubscribe(&self, sid: Id) -> ZResult<()> { + pub(crate) fn undeclare_subscriber_inner(&self, sid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(sub_state) = state.subscribers.remove(&sid) { - trace!("unsubscribe({:?})", sub_state); + trace!("undeclare_subscriber({:?})", sub_state); for res in state .local_resources .values_mut() @@ -1184,6 +1221,26 @@ impl Session { self.update_status_down(&state, &sub_state.key_expr) } } + } else { + #[cfg(feature = "unstable")] + if sub_state + .key_expr + .as_str() + .starts_with(crate::api::liveliness::PREFIX_LIVELINESS) + { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + + primitives.send_interest(Interest { + id: sub_state.id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: declare::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + }); + } } Ok(()) } else { @@ -1199,7 +1256,7 @@ impl Session { callback: Callback<'static, Query>, ) -> ZResult> { let mut state = zwrite!(self.state); - tracing::trace!("queryable({:?})", key_expr); + tracing::trace!("declare_queryable({:?})", key_expr); let id = self.runtime.next_id(); let qable_state = Arc::new(QueryableState { id, @@ -1236,7 +1293,7 @@ impl Session { pub(crate) fn close_queryable(&self, qid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(qable_state) = state.queryables.remove(&qid) { - trace!("close_queryable({:?})", qable_state); + trace!("undeclare_queryable({:?})", qable_state); if qable_state.origin != Locality::SessionLocal { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); @@ -1358,33 +1415,29 @@ impl Session { key_expr: &KeyExpr, destination: Locality, ) -> ZResult { - use crate::net::routing::dispatcher::tables::RoutingExpr; let router = self.runtime.router(); let tables = zread!(router.tables.tables); - let res = crate::net::routing::dispatcher::resource::Resource::get_resource( - &tables.root_res, - key_expr.as_str(), - ); - let route = crate::net::routing::dispatcher::pubsub::get_local_data_route( - &tables, - &res, - &mut RoutingExpr::new(&tables.root_res, key_expr.as_str()), - ); + let matching_subscriptions = + crate::net::routing::dispatcher::pubsub::get_matching_subscriptions(&tables, key_expr); drop(tables); let matching = match destination { - Locality::Any => !route.is_empty(), + Locality::Any => !matching_subscriptions.is_empty(), Locality::Remote => { if let Some(face) = zread!(self.state).primitives.as_ref() { - route.values().any(|dir| !Arc::ptr_eq(&dir.0, &face.state)) + matching_subscriptions + .values() + .any(|dir| !Arc::ptr_eq(dir, &face.state)) } else { - !route.is_empty() + !matching_subscriptions.is_empty() } } Locality::SessionLocal => { if let Some(face) = zread!(self.state).primitives.as_ref() { - route.values().any(|dir| Arc::ptr_eq(&dir.0, &face.state)) + matching_subscriptions + .values() + .any(|dir| Arc::ptr_eq(dir, &face.state)) } else { false } @@ -2070,7 +2123,7 @@ impl Primitives for Session { }; self.handle_data( false, - &m.ext_wire_expr.wire_expr, + &expr.to_wire(self), Some(data_info), ZBuf::default(), #[cfg(feature = "unstable")] @@ -2088,9 +2141,15 @@ impl Primitives for Session { zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { trace!("recv UndeclareQueryable {:?}", m.id); } - DeclareBody::DeclareToken(_) => todo!(), - DeclareBody::UndeclareToken(_) => todo!(), - DeclareBody::DeclareFinal(_) => todo!(), + DeclareBody::DeclareToken(m) => { + trace!("recv DeclareToken {:?}", m.id); + } + DeclareBody::UndeclareToken(m) => { + trace!("recv UndeclareToken {:?}", m.id); + } + DeclareBody::DeclareFinal(_) => { + trace!("recv DeclareFinal {:?}", msg.interest_id); + } } } @@ -2585,6 +2644,11 @@ pub trait SessionDeclarations<'s, 'a> { } impl crate::net::primitives::EPrimitives for Session { + #[inline] + fn send_interest(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_interest(ctx.msg) + } + #[inline] fn send_declare(&self, ctx: crate::net::routing::RoutingContext) { (self as &dyn Primitives).send_declare(ctx.msg) diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs index ba345f5116..a0cfd51811 100644 --- a/zenoh/src/api/subscriber.rs +++ b/zenoh/src/api/subscriber.rs @@ -145,7 +145,7 @@ impl Wait for SubscriberUndeclaration<'_> { self.subscriber.alive = false; self.subscriber .session - .unsubscribe(self.subscriber.state.id) + .undeclare_subscriber_inner(self.subscriber.state.id) } } @@ -161,7 +161,7 @@ impl IntoFuture for SubscriberUndeclaration<'_> { impl Drop for SubscriberInner<'_> { fn drop(&mut self) { if self.alive { - let _ = self.session.unsubscribe(self.state.id); + let _ = self.session.undeclare_subscriber_inner(self.state.id); } } } diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index b400d1a254..56bbbe4570 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -68,7 +68,7 @@ impl TransportPeerEventHandler for DeMux { match msg.body { NetworkBody::Push(m) => self.face.send_push(m), NetworkBody::Declare(m) => self.face.send_declare(m), - NetworkBody::Interest(_) => todo!(), + NetworkBody::Interest(m) => self.face.send_interest(m), NetworkBody::Request(m) => self.face.send_request(m), NetworkBody::Response(m) => self.face.send_response(m), NetworkBody::ResponseFinal(m) => self.face.send_response_final(m), diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs index d3aa8097ca..dbdcdd26f8 100644 --- a/zenoh/src/net/primitives/mod.rs +++ b/zenoh/src/net/primitives/mod.rs @@ -43,6 +43,8 @@ pub trait Primitives: Send + Sync { pub(crate) trait EPrimitives: Send + Sync { fn as_any(&self) -> &dyn Any; + fn send_interest(&self, ctx: RoutingContext); + fn send_declare(&self, ctx: RoutingContext); fn send_push(&self, msg: Push); @@ -76,6 +78,8 @@ impl Primitives for DummyPrimitives { } impl EPrimitives for DummyPrimitives { + fn send_interest(&self, _ctx: RoutingContext) {} + fn send_declare(&self, _ctx: RoutingContext) {} fn send_push(&self, _msg: Push) {} diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index df292b4315..f58b4550d0 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -197,6 +197,31 @@ impl Primitives for Mux { } impl EPrimitives for Mux { + fn send_interest(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Interest(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + outface: ctx.outface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix + .as_ref() + .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } + fn send_declare(&self, ctx: RoutingContext) { let ctx = RoutingContext { msg: NetworkMessage { @@ -497,6 +522,31 @@ impl Primitives for McastMux { } impl EPrimitives for McastMux { + fn send_interest(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Interest(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + outface: ctx.outface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix + .as_ref() + .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } + fn send_declare(&self, ctx: RoutingContext) { let ctx = RoutingContext { msg: NetworkMessage { diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index c5129f76e2..4669433145 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -21,7 +21,12 @@ use std::{ use tokio_util::sync::CancellationToken; use zenoh_protocol::{ core::{ExprId, WhatAmI, ZenohId}, - network::{Mapping, Push, Request, RequestId, Response, ResponseFinal}, + network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, Mapping, Push, Request, RequestId, Response, + ResponseFinal, + }, zenoh::RequestBody, }; use zenoh_sync::get_mut_unchecked; @@ -35,10 +40,19 @@ use crate::{ api::key_expr::KeyExpr, net::{ primitives::{McastMux, Mux, Primitives}, - routing::interceptor::{InterceptorTrait, InterceptorsChain}, + routing::{ + interceptor::{InterceptorTrait, InterceptorsChain}, + RoutingContext, + }, }, }; +pub(crate) struct InterestState { + pub(crate) options: InterestOptions, + pub(crate) res: Option>, + pub(crate) finalized: bool, +} + pub struct FaceState { pub(crate) id: usize, pub(crate) zid: ZenohId, @@ -46,6 +60,8 @@ pub struct FaceState { #[cfg(feature = "stats")] pub(crate) stats: Option>, pub(crate) primitives: Arc, + pub(crate) local_interests: HashMap, + pub(crate) remote_key_interests: HashMap>>, pub(crate) local_mappings: HashMap>, pub(crate) remote_mappings: HashMap>, pub(crate) next_qid: RequestId, @@ -75,6 +91,8 @@ impl FaceState { #[cfg(feature = "stats")] stats, primitives, + local_interests: HashMap::new(), + remote_key_interests: HashMap::new(), local_mappings: HashMap::new(), remote_mappings: HashMap::new(), next_qid: 0, @@ -191,8 +209,67 @@ impl Face { } impl Primitives for Face { - fn send_interest(&self, _msg: zenoh_protocol::network::Interest) { - todo!() + fn send_interest(&self, msg: zenoh_protocol::network::Interest) { + let ctrl_lock = zlock!(self.tables.ctrl_lock); + if msg.mode != InterestMode::Final { + if msg.options.keyexprs() && msg.mode != InterestMode::Current { + register_expr_interest( + &self.tables, + &mut self.state.clone(), + msg.id, + msg.wire_expr.as_ref(), + ); + } + if msg.options.subscribers() { + declare_sub_interest( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + msg.id, + msg.wire_expr.as_ref(), + msg.mode, + msg.options.aggregate(), + ); + } + if msg.options.queryables() { + declare_qabl_interest( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + msg.id, + msg.wire_expr.as_ref(), + msg.mode, + msg.options.aggregate(), + ); + } + if msg.mode != InterestMode::Future { + self.state.primitives.send_declare(RoutingContext::new_out( + Declare { + interest_id: Some(msg.id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }, + self.clone(), + )); + } + } else { + unregister_expr_interest(&self.tables, &mut self.state.clone(), msg.id); + undeclare_sub_interest( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + msg.id, + ); + undeclare_qabl_interest( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + msg.id, + ); + } + drop(ctrl_lock); } fn send_declare(&self, msg: zenoh_protocol::network::Declare) { @@ -246,9 +323,20 @@ impl Primitives for Face { msg.ext_nodeid.node_id, ); } - zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), - zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), - zenoh_protocol::network::DeclareBody::DeclareFinal(_) => todo!(), + zenoh_protocol::network::DeclareBody::DeclareToken(m) => { + tracing::warn!("Received unsupported {m:?}") + } + zenoh_protocol::network::DeclareBody::UndeclareToken(m) => { + tracing::warn!("Received unsupported {m:?}") + } + zenoh_protocol::network::DeclareBody::DeclareFinal(_) => { + if let Some(id) = msg.interest_id { + get_mut_unchecked(&mut self.state.clone()) + .local_interests + .entry(id) + .and_modify(|interest| interest.finalized = true); + } + } } drop(ctrl_lock); } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 94c6f7b1a6..4e69e45dc3 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -18,6 +18,7 @@ use zenoh_protocol::{ core::{key_expr::keyexpr, WhatAmI, WireExpr}, network::{ declare::{ext, subscriber::ext::SubscriberInfo, SubscriberId}, + interest::{InterestId, InterestMode}, Push, }, zenoh::PushBody, @@ -29,8 +30,90 @@ use super::{ resource::{DataRoutes, Direction, Resource}, tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}, }; +#[zenoh_macros::unstable] +use crate::key_expr::KeyExpr; use crate::net::routing::hat::HatTrait; +pub(crate) fn declare_sub_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: InterestId, + expr: Option<&WireExpr>, + mode: InterestMode, + aggregate: bool, +) { + if let Some(expr) = expr { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + tracing::debug!( + "{} Declare sub interest {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = if res + .as_ref() + .map(|r| r.context.is_some()) + .unwrap_or(false) + { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + hat_code.declare_sub_interest( + &mut wtables, + face, + id, + Some(&mut res), + mode, + aggregate, + ); + } + None => tracing::error!( + "{} Declare sub interest {} for unknown scope {}!", + face, + id, + expr.scope + ), + } + } else { + let mut wtables = zwrite!(tables.tables); + hat_code.declare_sub_interest(&mut wtables, face, id, None, mode, aggregate); + } +} + +pub(crate) fn undeclare_sub_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: InterestId, +) { + tracing::debug!("{} Undeclare sub interest {}", face, id,); + let mut wtables = zwrite!(tables.tables); + hat_code.undeclare_sub_interest(&mut wtables, face, id); +} + pub(crate) fn declare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, @@ -329,18 +412,11 @@ fn get_data_route( #[zenoh_macros::unstable] #[inline] -pub(crate) fn get_local_data_route( +pub(crate) fn get_matching_subscriptions( tables: &Tables, - res: &Option>, - expr: &mut RoutingExpr, -) -> Arc { - res.as_ref() - .and_then(|res| res.data_route(WhatAmI::Client, 0)) - .unwrap_or_else(|| { - tables - .hat_code - .compute_data_route(tables, expr, 0, WhatAmI::Client) - }) + key_expr: &KeyExpr<'_>, +) -> HashMap> { + tables.hat_code.get_matching_subscriptions(tables, key_expr) } #[cfg(feature = "stats")] diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 2bbc924e0b..23e405c3c4 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -24,6 +24,7 @@ use zenoh_protocol::{ core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ declare::{ext, queryable::ext::QueryableInfoType, QueryableId}, + interest::{InterestId, InterestMode}, request::{ext::TargetType, Request, RequestId}, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, @@ -39,6 +40,87 @@ use super::{ }; use crate::net::routing::{hat::HatTrait, RoutingContext}; +#[allow(clippy::too_many_arguments)] // TODO refactor +pub(crate) fn declare_qabl_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: InterestId, + expr: Option<&WireExpr>, + mode: InterestMode, + aggregate: bool, +) { + if let Some(expr) = expr { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + tracing::debug!( + "{} Declare qabl interest {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = if res + .as_ref() + .map(|r| r.context.is_some()) + .unwrap_or(false) + { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + hat_code.declare_qabl_interest( + &mut wtables, + face, + id, + Some(&mut res), + mode, + aggregate, + ); + } + None => tracing::error!( + "{} Declare qabl interest {} for unknown scope {}!", + face, + id, + expr.scope + ), + } + } else { + let mut wtables = zwrite!(tables.tables); + hat_code.declare_qabl_interest(&mut wtables, face, id, None, mode, aggregate); + } +} + +pub(crate) fn undeclare_qabl_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: InterestId, +) { + tracing::debug!("{} Undeclare qabl interest {}", face, id,); + let mut wtables = zwrite!(tables.tables); + hat_code.undeclare_qabl_interest(&mut wtables, face, id); +} + pub(crate) struct Query { src_face: Arc, src_qid: RequestId, diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index d8765e16ae..e6b13dc2c8 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -27,6 +27,7 @@ use zenoh_protocol::{ ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, }, + interest::InterestId, Mapping, RequestId, }, }; @@ -60,6 +61,20 @@ pub(crate) struct SessionContext { pub(crate) e_interceptor_cache: Option>, } +impl SessionContext { + pub(crate) fn new(face: Arc) -> Self { + Self { + face, + local_expr_id: None, + remote_expr_id: None, + subs: None, + qabl: None, + in_interceptor_cache: None, + e_interceptor_cache: None, + } + } +} + #[derive(Default)] pub(crate) struct RoutesIndexes { pub(crate) routers: Vec, @@ -217,6 +232,16 @@ impl Resource { self.context.as_mut().unwrap() } + #[inline(always)] + pub(crate) fn matches(&self, other: &Arc) -> bool { + self.context + .as_ref() + .unwrap() + .matches + .iter() + .any(|m| m.upgrade().is_some_and(|m| &m == other)) + } + pub fn nonwild_prefix(res: &Arc) -> (Option>, String) { match &res.nonwild_prefix { None => (Some(res.clone()), "".to_string()), @@ -434,34 +459,34 @@ impl Resource { let (nonwild_prefix, wildsuffix) = Resource::nonwild_prefix(res); match nonwild_prefix { Some(mut nonwild_prefix) => { - let ctx = get_mut_unchecked(&mut nonwild_prefix) + if let Some(ctx) = get_mut_unchecked(&mut nonwild_prefix) .session_ctxs - .entry(face.id) - .or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - }); - - if let Some(expr_id) = ctx.remote_expr_id { - WireExpr { - scope: expr_id, - suffix: wildsuffix.into(), - mapping: Mapping::Receiver, + .get(&face.id) + { + if let Some(expr_id) = ctx.remote_expr_id { + return WireExpr { + scope: expr_id, + suffix: wildsuffix.into(), + mapping: Mapping::Receiver, + }; } - } else if let Some(expr_id) = ctx.local_expr_id { - WireExpr { - scope: expr_id, - suffix: wildsuffix.into(), - mapping: Mapping::Sender, + if let Some(expr_id) = ctx.local_expr_id { + return WireExpr { + scope: expr_id, + suffix: wildsuffix.into(), + mapping: Mapping::Sender, + }; } - } else { + } + if face.remote_key_interests.values().any(|res| { + res.as_ref() + .map(|res| res.matches(&nonwild_prefix)) + .unwrap_or(true) + }) { + let ctx = get_mut_unchecked(&mut nonwild_prefix) + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); let expr_id = face.get_next_local_id(); get_mut_unchecked(ctx).local_expr_id = Some(expr_id); get_mut_unchecked(face) @@ -486,6 +511,8 @@ impl Resource { suffix: wildsuffix.into(), mapping: Mapping::Sender, } + } else { + res.expr().into() } } None => wildsuffix.into(), @@ -650,7 +677,7 @@ impl Resource { } } -pub fn register_expr( +pub(crate) fn register_expr( tables: &TablesLock, face: &mut Arc, expr_id: ExprId, @@ -697,20 +724,12 @@ pub fn register_expr( Resource::match_resource(&wtables, &mut res, matches); (res, wtables) }; - get_mut_unchecked(&mut res) + let ctx = get_mut_unchecked(&mut res) .session_ctxs .entry(face.id) - .or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: Some(expr_id), - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - }); + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + + get_mut_unchecked(ctx).remote_expr_id = Some(expr_id); get_mut_unchecked(face) .remote_mappings @@ -728,7 +747,7 @@ pub fn register_expr( } } -pub fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: ExprId) { +pub(crate) fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: ExprId) { let wtables = zwrite!(tables.tables); match get_mut_unchecked(face).remote_mappings.remove(&expr_id) { Some(mut res) => Resource::clean(&mut res), @@ -736,3 +755,64 @@ pub fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: } drop(wtables); } + +pub(crate) fn register_expr_interest( + tables: &TablesLock, + face: &mut Arc, + id: InterestId, + expr: Option<&WireExpr>, +) { + if let Some(expr) = expr { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (res, wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + get_mut_unchecked(face) + .remote_key_interests + .insert(id, Some(res)); + drop(wtables); + } + None => tracing::error!( + "Declare keyexpr interest with unknown scope {}!", + expr.scope + ), + } + } else { + let wtables = zwrite!(tables.tables); + get_mut_unchecked(face) + .remote_key_interests + .insert(id, None); + drop(wtables); + } +} + +pub(crate) fn unregister_expr_interest( + tables: &TablesLock, + face: &mut Arc, + id: InterestId, +) { + let wtables = zwrite!(tables.tables); + get_mut_unchecked(face).remote_key_interests.remove(&id); + drop(wtables); +} diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 3b4e7c7103..921dc7554c 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -26,6 +26,7 @@ use std::{ use zenoh_config::WhatAmI; use zenoh_protocol::network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + interest::InterestId, Oam, }; use zenoh_result::ZResult; @@ -285,6 +286,7 @@ impl HatContext { struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_sub_interests: HashMap>>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, @@ -295,6 +297,7 @@ impl HatFace { fn new() -> Self { Self { next_id: AtomicU32::new(0), + remote_sub_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), local_qabls: HashMap::new(), diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 3334fbfb14..a87a4e7f1e 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -19,23 +19,30 @@ use std::{ use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, SubscriberId, UndeclareSubscriber, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode, InterestOptions}, + Interest, }, }; use zenoh_sync::get_mut_unchecked; use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; -use crate::net::routing::{ - dispatcher::{ - face::FaceState, - resource::{NodeId, Resource, SessionContext}, - tables::{Route, RoutingExpr, Tables}, +use crate::{ + key_expr::KeyExpr, + net::routing::{ + dispatcher::{ + face::{FaceState, InterestState}, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, Sources}, + router::{update_data_routes_from, RoutesIndexes}, + RoutingContext, PREFIX_LIVELINESS, }, - hat::{HatPubSubTrait, Sources}, - router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, }; #[inline] @@ -104,18 +111,11 @@ fn register_client_subscription( } } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } @@ -243,7 +243,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; - for src_face in tables + for mut src_face in tables .faces .values() .cloned() @@ -252,10 +252,134 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in face_hat!(src_face).remote_subs.values() { propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } + if face.whatami != WhatAmI::Client { + for res in face_hat_mut!(&mut src_face).remote_sub_interests.values() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + let options = InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS; + get_mut_unchecked(face).local_interests.insert( + id, + InterestState { + options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: false, + }, + ); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face)); + face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::CurrentFuture, + options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + } } + // recompute routes + update_data_routes_from(tables, &mut tables.root_res.clone()); } impl HatPubSubTrait for HatCode { + fn declare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + _aggregate: bool, + ) { + face_hat_mut!(face) + .remote_sub_interests + .insert(id, res.as_ref().map(|res| (*res).clone())); + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami != WhatAmI::Client) + { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + let options = InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS; + get_mut_unchecked(dst_face).local_interests.insert( + id, + InterestState { + options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: mode == InterestMode::Future, + }, + ); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, dst_face)); + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode, + options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + } + + fn undeclare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + if let Some(interest) = face_hat_mut!(face).remote_sub_interests.remove(&id) { + if !tables.faces.values().any(|f| { + f.whatami == WhatAmI::Client + && face_hat!(f) + .remote_sub_interests + .values() + .any(|i| *i == interest) + }) { + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami != WhatAmI::Client) + { + for id in dst_face + .local_interests + .keys() + .cloned() + .collect::>() + { + let local_interest = dst_face.local_interests.get(&id).unwrap(); + if local_interest.options.subscribers() && (local_interest.res == interest) + { + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + local_interest + .res + .as_ref() + .map(|res| res.expr()) + .unwrap_or_default(), + )); + get_mut_unchecked(dst_face).local_interests.remove(&id); + } + } + } + } + } + } + fn declare_subscription( &self, tables: &mut Tables, @@ -322,6 +446,51 @@ impl HatPubSubTrait for HatCode { return Arc::new(route); } }; + + for face in tables + .faces + .values() + .filter(|f| f.whatami != WhatAmI::Client) + { + if face.local_interests.values().any(|interest| { + interest.finalized + && interest.options.subscribers() + && interest + .res + .as_ref() + .map(|res| { + KeyExpr::try_from(res.expr()) + .and_then(|intres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| intres.includes(&putres)) + }) + .unwrap_or(false) + }) + .unwrap_or(true) + }) { + if face_hat!(face).remote_subs.values().any(|sub| { + KeyExpr::try_from(sub.expr()) + .and_then(|subres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| subres.intersects(&putres)) + }) + .unwrap_or(false) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } else { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() @@ -333,15 +502,7 @@ impl HatPubSubTrait for HatCode { let mres = mres.upgrade().unwrap(); for (sid, context) in &mres.session_ctxs { - if context.subs.is_some() - && match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } - { + if context.subs.is_some() && context.face.whatami == WhatAmI::Client { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); (context.face.clone(), key_expr.to_owned(), NodeId::default()) @@ -365,4 +526,62 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } + + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + + for face in tables + .faces + .values() + .filter(|f| f.whatami != WhatAmI::Client) + { + if face.local_interests.values().any(|interest| { + interest.finalized + && interest.options.subscribers() + && interest + .res + .as_ref() + .map(|res| { + KeyExpr::try_from(res.expr()) + .map(|intres| intres.includes(key_expr)) + .unwrap_or(false) + }) + .unwrap_or(true) + }) && face_hat!(face).remote_subs.values().any(|sub| { + KeyExpr::try_from(sub.expr()) + .map(|subres| subres.intersects(key_expr)) + .unwrap_or(false) + }) { + matching_subscriptions.insert(face.id, face.clone()); + } + } + + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() && context.face.whatami == WhatAmI::Client { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index c915d788a9..749c03d5f8 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -27,9 +27,12 @@ use zenoh_protocol::{ }, WhatAmI, WireExpr, }, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, QueryableId, UndeclareQueryable, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -99,6 +102,7 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), (id, info)); let key_expr = Resource::decl_key(res, &mut dst_face); + println!("Decled key = {key_expr:?}"); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -127,17 +131,11 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } face_hat_mut!(face).remote_qabls.insert(id, res.clone()); @@ -260,6 +258,27 @@ lazy_static::lazy_static! { } impl HatQueriesTrait for HatCode { + fn declare_qabl_interest( + &self, + _tables: &mut Tables, + _face: &mut Arc, + _id: InterestId, + _res: Option<&mut Arc>, + _mode: InterestMode, + _aggregate: bool, + ) { + // ignore + } + + fn undeclare_qabl_interest( + &self, + _tables: &mut Tables, + _face: &mut Arc, + _id: InterestId, + ) { + // ignore + } + fn declare_queryable( &self, tables: &mut Tables, @@ -326,6 +345,16 @@ impl HatQueriesTrait for HatCode { return EMPTY_ROUTE.clone(); } }; + + if let Some(face) = tables.faces.values().find(|f| f.whatami != WhatAmI::Client) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: f64::MAX, + }); + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index e76f53a0dd..bb5aec4db1 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -29,6 +29,7 @@ use zenoh_protocol::{ common::ZExtBody, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + interest::InterestId, oam::id::OAM_LINKSTATE, Oam, }, @@ -480,8 +481,10 @@ impl HatContext { struct HatFace { link_id: usize, next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_sub_interests: HashMap>, bool)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, + remote_qabl_interests: HashMap>>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -491,8 +494,10 @@ impl HatFace { Self { link_id: 0, next_id: AtomicU32::new(0), + remote_sub_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), + remote_qabl_interests: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index e5f7da81f7..135f899656 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -20,9 +20,12 @@ use std::{ use petgraph::graph::NodeIndex; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohId}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, SubscriberId, UndeclareSubscriber, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -31,16 +34,19 @@ use super::{ face_hat, face_hat_mut, get_peer, get_routes_entries, hat, hat_mut, network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, }; -use crate::net::routing::{ - dispatcher::{ - face::FaceState, - pubsub::*, - resource::{NodeId, Resource, SessionContext}, - tables::{Route, RoutingExpr, Tables}, +use crate::{ + key_expr::KeyExpr, + net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, }, - hat::{HatPubSubTrait, Sources}, - router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, }; #[inline] @@ -96,23 +102,59 @@ fn propagate_simple_subscription_to( && !face_hat!(dst_face).local_subs.contains_key(res) && dst_face.whatami == WhatAmI::Client { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } else { + let matching_interests = face_hat!(dst_face) + .remote_sub_interests + .values() + .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, bool)>>(); + + for (int_res, aggregate) in matching_interests { + let res = if aggregate { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } + } + } } } @@ -220,18 +262,11 @@ fn register_client_subscription( } } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } @@ -273,6 +308,13 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_subs(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) +} + #[inline] fn send_forget_sourced_subscription_to_net_childs( tables: &Tables, @@ -313,8 +355,8 @@ fn send_forget_sourced_subscription_to_net_childs( } fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { - for face in tables.faces.values_mut() { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -329,6 +371,35 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc res.expr(), )); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_subs(&m, &face) || remote_peer_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -417,8 +488,9 @@ pub(super) fn undeclare_client_subscription( if client_subs.is_empty() { undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); } + if client_subs.len() == 1 && !peer_subs { - let face = &mut client_subs[0]; + let mut face = &mut client_subs[0]; if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( @@ -435,6 +507,35 @@ pub(super) fn undeclare_client_subscription( res.expr(), )); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_subs(&m, face) || remote_peer_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -453,32 +554,8 @@ fn forget_client_subscription( } } -pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - }; - - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).peer_subs { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } +pub(super) fn pubsub_new_face(_tables: &mut Tables, _face: &mut Arc) { + // Nothing to do } pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId) { @@ -534,40 +611,129 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec, -) { - if net.trees.len() > source as usize { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source as usize].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] - { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - (face.clone(), key_expr.to_owned(), source) - }); - } +impl HatPubSubTrait for HatCode { + fn declare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).peer_subs.iter().any(|sub| { + sub.context.is_some() + && sub.matches(res) + && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); + } + } else { + for sub in &hat!(tables).peer_subs { + if sub.context.is_some() + && sub.matches(res) + && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } + } else { + for sub in &hat!(tables).peer_subs { + if sub.context.is_some() + && (remote_client_subs(sub, face) || remote_peer_subs(tables, sub)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } } } - } else { - tracing::trace!("Tree for node sid:{} not yet ready", source); + if mode.future() { + face_hat_mut!(face) + .remote_sub_interests + .insert(id, (res.cloned(), aggregate)); + } + } + + fn undeclare_sub_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_sub_interests.remove(&id); } -} -impl HatPubSubTrait for HatCode { fn declare_subscription( &self, tables: &mut Tables, @@ -644,6 +810,43 @@ impl HatPubSubTrait for HatCode { source: NodeId, source_type: WhatAmI, ) -> Arc { + #[inline] + fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: NodeId, + subs: &HashSet, + ) { + if net.trees.len() > source as usize { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source as usize].directions.len() > sub_idx.index() { + if let Some(direction) = + net.trees[source as usize].directions[sub_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = Resource::get_best_key( + expr.prefix, + expr.suffix, + face.id, + ); + (face.clone(), key_expr.to_owned(), source) + }); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + let mut route = HashMap::new(); let key_expr = expr.full_expr(); if key_expr.ends_with('/') { @@ -688,13 +891,7 @@ impl HatPubSubTrait for HatCode { for (sid, context) in &mres.session_ctxs { if context.subs.is_some() - && match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } + && (source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client) { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); @@ -719,4 +916,72 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } + + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + #[inline] + fn insert_faces_for_subs( + route: &mut HashMap>, + tables: &Tables, + net: &Network, + source: usize, + subs: &HashSet, + ) { + if net.trees.len() > source { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source].directions[sub_idx.index()] { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| face.clone()); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + let net = hat!(tables).peers_net.as_ref().unwrap(); + insert_faces_for_subs( + &mut matching_subscriptions, + tables, + net, + net.idx.index(), + &res_hat!(mres).peer_subs, + ); + + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index bed683f717..3d9babbd5d 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -28,9 +28,12 @@ use zenoh_protocol::{ }, WhatAmI, WireExpr, ZenohId, }, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, QueryableId, UndeclareQueryable, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -46,7 +49,7 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, - hat::{HatQueriesTrait, Sources}, + hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, router::RoutesIndexes, RoutingContext, PREFIX_LIVELINESS, }; @@ -170,6 +173,10 @@ fn propagate_simple_queryable( if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current.is_none() || current.unwrap().1 != info) && dst_face.whatami == WhatAmI::Client + && face_hat!(dst_face) + .remote_qabl_interests + .values() + .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true)) { let id = current .map(|c| c.0) @@ -279,17 +286,11 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } face_hat_mut!(face).remote_qabls.insert(id, res.clone()); @@ -331,6 +332,13 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) +} + #[inline] fn send_forget_sourced_queryable_to_net_childs( tables: &Tables, @@ -371,8 +379,8 @@ fn send_forget_sourced_queryable_to_net_childs( } fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { - for face in tables.faces.values_mut() { - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + for mut face in tables.faces.values().cloned() { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -387,6 +395,35 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_qabls(&m, &face) || remote_peer_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -485,7 +522,7 @@ pub(super) fn undeclare_client_queryable( } if client_qabls.len() == 1 && !peer_qabls { - let face = &mut client_qabls[0]; + let mut face = &mut client_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -501,6 +538,35 @@ pub(super) fn undeclare_client_queryable( res.expr(), )); } + for res in face_hat!(face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_qabls(&m, face) || remote_peer_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -518,33 +584,8 @@ fn forget_client_queryable( } } -pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { - if face.whatami == WhatAmI::Client { - for qabl in &hat!(tables).peer_qabls { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } +pub(super) fn queries_new_face(_tables: &mut Tables, _face: &mut Arc) { + // Nothing to do } pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId) { @@ -644,6 +685,134 @@ lazy_static::lazy_static! { } impl HatQueriesTrait for HatCode { + fn declare_qabl_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).peer_qabls.iter().any(|qabl| { + qabl.context.is_some() + && qabl.matches(res) + && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + )); + } + } else { + for qabl in hat!(tables).peer_qabls.iter() { + if qabl.context.is_some() + && qabl.matches(res) + && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } else { + for qabl in hat!(tables).peer_qabls.iter() { + if qabl.context.is_some() + && (remote_client_qabls(qabl, face) || remote_peer_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } + if mode.future() { + face_hat_mut!(face) + .remote_qabl_interests + .insert(id, res.cloned()); + } + } + + fn undeclare_qabl_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_qabl_interests.remove(&id); + } + fn declare_queryable( &self, tables: &mut Tables, @@ -765,10 +934,7 @@ impl HatQueriesTrait for HatCode { ); for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, - } { + if source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); if let Some(qabl_info) = context.qabl.as_ref() { route.push(QueryTargetQabl { diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 5eb812df71..3a7844ea44 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -17,7 +17,7 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use std::{any::Any, sync::Arc}; +use std::{any::Any, collections::HashMap, sync::Arc}; use zenoh_buffers::ZBuf; use zenoh_config::{unwrap_or_default, Config, WhatAmI, ZenohId}; @@ -28,6 +28,7 @@ use zenoh_protocol::{ queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, SubscriberId, }, + interest::{InterestId, InterestMode}, Oam, }, }; @@ -41,7 +42,7 @@ use super::{ }, router::RoutesIndexes, }; -use crate::net::runtime::Runtime; +use crate::{key_expr::KeyExpr, runtime::Runtime}; mod client; mod linkstate_peer; @@ -135,6 +136,22 @@ pub(crate) trait HatBaseTrait { } pub(crate) trait HatPubSubTrait { + #[allow(clippy::too_many_arguments)] // TODO refactor + fn declare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ); + fn undeclare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ); fn declare_subscription( &self, tables: &mut Tables, @@ -164,9 +181,31 @@ pub(crate) trait HatPubSubTrait { ) -> Arc; fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes; + + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap>; } pub(crate) trait HatQueriesTrait { + #[allow(clippy::too_many_arguments)] // TODO refactor + fn declare_qabl_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ); + fn undeclare_qabl_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ); fn declare_queryable( &self, tables: &mut Tables, @@ -219,3 +258,20 @@ pub(crate) fn new_hat(whatami: WhatAmI, config: &Config) -> Box Box::new(router::HatCode {}), } } + +trait CurrentFutureTrait { + fn future(&self) -> bool; + fn current(&self) -> bool; +} + +impl CurrentFutureTrait for InterestMode { + #[inline] + fn future(&self) -> bool { + self == &InterestMode::Future || self == &InterestMode::CurrentFuture + } + + #[inline] + fn current(&self) -> bool { + self == &InterestMode::Current || self == &InterestMode::CurrentFuture + } +} diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 530c181335..5ac77a3135 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -28,6 +28,7 @@ use zenoh_protocol::{ common::ZExtBody, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + interest::InterestId, oam::id::OAM_LINKSTATE, Oam, }, @@ -357,8 +358,10 @@ impl HatContext { struct HatFace { next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_sub_interests: HashMap>, bool)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, + remote_qabl_interests: HashMap>>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -367,8 +370,10 @@ impl HatFace { fn new() -> Self { Self { next_id: AtomicU32::new(0), + remote_sub_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), + remote_qabl_interests: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), } diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index e7cf0c5e5d..31172e2804 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -19,23 +19,29 @@ use std::{ use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, SubscriberId, UndeclareSubscriber, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; -use crate::net::routing::{ - dispatcher::{ - face::FaceState, - resource::{NodeId, Resource, SessionContext}, - tables::{Route, RoutingExpr, Tables}, +use crate::{ + key_expr::KeyExpr, + net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, }, - hat::{HatPubSubTrait, Sources}, - router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, }; #[inline] @@ -51,23 +57,59 @@ fn propagate_simple_subscription_to( && !face_hat!(dst_face).local_subs.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } else { + let matching_interests = face_hat!(dst_face) + .remote_sub_interests + .values() + .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, bool)>>(); + + for (int_res, aggregate) in matching_interests { + let res = if aggregate { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } + } + } } } @@ -104,18 +146,11 @@ fn register_client_subscription( } } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } @@ -169,9 +204,16 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_subs(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) +} + fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { - for face in tables.faces.values_mut() { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -186,6 +228,33 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc res.expr(), )); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_client_subs(&m, &face)) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -203,8 +272,9 @@ pub(super) fn undeclare_client_subscription( if client_subs.is_empty() { propagate_forget_simple_subscription(tables, res); } + if client_subs.len() == 1 { - let face = &mut client_subs[0]; + let mut face = &mut client_subs[0]; if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( @@ -221,6 +291,33 @@ pub(super) fn undeclare_client_subscription( res.expr(), )); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_client_subs(&m, face)) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -240,22 +337,168 @@ fn forget_client_subscription( } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - }; - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in face_hat!(src_face).remote_subs.values() { - propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); + if face.whatami != WhatAmI::Client { + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in face_hat!(src_face).remote_subs.values() { + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + ); + } } } } impl HatPubSubTrait for HatCode { + fn declare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + src_face.id != face.id + && face_hat!(src_face) + .remote_subs + .values() + .any(|sub| sub.context.is_some() && sub.matches(res)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for sub in face_hat!(src_face).remote_subs.values() { + if sub.context.is_some() && sub.matches(res) { + let id = if mode.future() { + let id = + face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber( + DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }, + ), + }, + sub.expr(), + )); + } + } + } + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for sub in face_hat!(src_face).remote_subs.values() { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } + } + } + } + if mode.future() { + face_hat_mut!(face) + .remote_sub_interests + .insert(id, (res.cloned(), aggregate)); + } + } + + fn undeclare_sub_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_sub_interests.remove(&id); + } + fn declare_subscription( &self, tables: &mut Tables, @@ -334,13 +577,7 @@ impl HatPubSubTrait for HatCode { for (sid, context) in &mres.session_ctxs { if context.subs.is_some() - && match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } + && (source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client) { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); @@ -365,4 +602,35 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } + + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index f0de12d7b9..1801f66c84 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -27,9 +27,12 @@ use zenoh_protocol::{ }, WhatAmI, WireExpr, }, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, QueryableId, UndeclareQueryable, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -41,7 +44,7 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, - hat::{HatQueriesTrait, Sources}, + hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, router::RoutesIndexes, RoutingContext, PREFIX_LIVELINESS, }; @@ -77,43 +80,62 @@ fn local_qabl_info( .unwrap_or(QueryableInfoType::DEFAULT) } +#[inline] +fn propagate_simple_queryable_to( + tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + src_face: &Option<&mut Arc>, +) { + let info = local_qabl_info(tables, res, dst_face); + let current = face_hat!(dst_face).local_qabls.get(res); + if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) + && (current.is_none() || current.unwrap().1 != info) + && (dst_face.whatami != WhatAmI::Client + || face_hat!(dst_face) + .remote_qabl_interests + .values() + .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true))) + && (src_face.is_none() + || src_face.as_ref().unwrap().whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client) + { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); + face_hat_mut!(dst_face) + .local_qabls + .insert(res.clone(), (id, info)); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + )); + } +} + fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, ) { - let faces = tables.faces.values().cloned(); + let faces = tables + .faces + .values() + .cloned() + .collect::>>(); for mut dst_face in faces { - let info = local_qabl_info(tables, res, &dst_face); - let current = face_hat!(dst_face).local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current.is_none() || current.unwrap().1 != info) - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client) - { - let id = current - .map(|c| c.0) - .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); - face_hat_mut!(&mut dst_face) - .local_qabls - .insert(res.clone(), (id, info)); - let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); - } + propagate_simple_queryable_to(tables, &mut dst_face, res, &src_face); } } @@ -127,17 +149,11 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } face_hat_mut!(face).remote_qabls.insert(id, res.clone()); @@ -168,6 +184,13 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) +} + fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { @@ -185,6 +208,33 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_client_qabls(&m, face)) + }) { + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -209,7 +259,7 @@ pub(super) fn undeclare_client_queryable( propagate_simple_queryable(tables, res, None); } if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; + let mut face = &mut client_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -225,6 +275,33 @@ pub(super) fn undeclare_client_queryable( res.expr(), )); } + for res in face_hat!(face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && (remote_client_qabls(&m, face))) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -242,15 +319,17 @@ fn forget_client_queryable( } } -pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face_hat!(face).remote_qabls.values() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); +pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { + if face.whatami != WhatAmI::Client { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(src_face).remote_qabls.values() { + propagate_simple_queryable_to(tables, face, qabl, &Some(&mut src_face.clone())); + } } } } @@ -260,6 +339,150 @@ lazy_static::lazy_static! { } impl HatQueriesTrait for HatCode { + fn declare_qabl_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + src_face.id != face.id + && face_hat!(src_face) + .remote_qabls + .values() + .any(|qabl| qabl.context.is_some() && qabl.matches(res)) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + )); + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for qabl in face_hat!(src_face).remote_qabls.values() { + if qabl.context.is_some() && qabl.matches(res) { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = + face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for qabl in face_hat!(src_face).remote_qabls.values() { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } + } + } + if mode.future() { + face_hat_mut!(face) + .remote_qabl_interests + .insert(id, res.cloned()); + } + } + + fn undeclare_qabl_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_qabl_interests.remove(&id); + } + fn declare_queryable( &self, tables: &mut Tables, @@ -337,10 +560,7 @@ impl HatQueriesTrait for HatCode { let mres = mres.upgrade().unwrap(); let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, - } { + if source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); if let Some(qabl_info) = context.qabl.as_ref() { route.push(QueryTargetQabl { diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index f573acee43..54b132f665 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -30,6 +30,7 @@ use zenoh_protocol::{ common::ZExtBody, network::{ declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + interest::InterestId, oam::id::OAM_LINKSTATE, Oam, }, @@ -785,8 +786,10 @@ impl HatContext { struct HatFace { link_id: usize, next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_sub_interests: HashMap>, bool)>, local_subs: HashMap, SubscriberId>, remote_subs: HashMap>, + remote_qabl_interests: HashMap>>, local_qabls: HashMap, (QueryableId, QueryableInfoType)>, remote_qabls: HashMap>, } @@ -796,8 +799,10 @@ impl HatFace { Self { link_id: 0, next_id: AtomicU32::new(0), + remote_sub_interests: HashMap::new(), local_subs: HashMap::new(), remote_subs: HashMap::new(), + remote_qabl_interests: HashMap::new(), local_qabls: HashMap::new(), remote_qabls: HashMap::new(), } diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 14726ac970..3bfb0fdd6f 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -20,9 +20,12 @@ use std::{ use petgraph::graph::NodeIndex; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohId}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, SubscriberId, UndeclareSubscriber, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -31,16 +34,19 @@ use super::{ face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, network::Network, res_hat, res_hat_mut, HatCode, HatContext, HatFace, HatTables, }; -use crate::net::routing::{ - dispatcher::{ - face::FaceState, - pubsub::*, - resource::{NodeId, Resource, SessionContext}, - tables::{Route, RoutingExpr, Tables}, +use crate::{ + key_expr::KeyExpr, + net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, + router::RoutesIndexes, + RoutingContext, PREFIX_LIVELINESS, }, - hat::{HatPubSubTrait, Sources}, - router::RoutesIndexes, - RoutingContext, PREFIX_LIVELINESS, }; #[inline] @@ -105,23 +111,59 @@ fn propagate_simple_subscription_to( || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) } { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } else { + let matching_interests = face_hat!(dst_face) + .remote_sub_interests + .values() + .filter(|si| si.0.as_ref().map(|si| si.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, bool)>>(); + + for (int_res, aggregate) in matching_interests { + let res = if aggregate { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } + } + } } } @@ -272,18 +314,11 @@ fn register_client_subscription( } } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } @@ -334,6 +369,13 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_subs(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) +} + #[inline] fn send_forget_sourced_subscription_to_net_childs( tables: &Tables, @@ -374,8 +416,8 @@ fn send_forget_sourced_subscription_to_net_childs( } fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { - for face in tables.faces.values_mut() { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -390,6 +432,37 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc res.expr(), )); } + for res in face_hat!(&mut face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_subs(&m, &face) + || remote_peer_subs(tables, &m) + || remote_router_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -563,8 +636,9 @@ pub(super) fn undeclare_client_subscription( } else { propagate_forget_simple_subscription_to_peers(tables, res); } + if client_subs.len() == 1 && !router_subs && !peer_subs { - let face = &mut client_subs[0]; + let mut face = &mut client_subs[0]; if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( @@ -581,6 +655,37 @@ pub(super) fn undeclare_client_subscription( res.expr(), )); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_subs(&m, face) + || remote_peer_subs(tables, &m) + || remote_router_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -604,27 +709,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).router_subs { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { for sub in &hat!(tables).router_subs { if sub.context.is_some() && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) @@ -826,40 +911,135 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: } } -#[inline] -fn insert_faces_for_subs( - route: &mut Route, - expr: &RoutingExpr, - tables: &Tables, - net: &Network, - source: NodeId, - subs: &HashSet, -) { - if net.trees.len() > source as usize { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source as usize].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] - { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - (face.clone(), key_expr.to_owned(), source) - }); - } +impl HatPubSubTrait for HatCode { + fn declare_sub_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).router_subs.iter().any(|sub| { + sub.context.is_some() + && sub.matches(res) + && (remote_client_subs(sub, face) + || remote_peer_subs(tables, sub) + || remote_router_subs(tables, sub)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); + } + } else { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && sub.matches(res) + && (remote_client_subs(sub, face) + || remote_peer_subs(tables, sub) + || remote_router_subs(tables, sub)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } + } else { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && (remote_client_subs(sub, face) + || remote_peer_subs(tables, sub) + || remote_router_subs(tables, sub)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } } } - } else { - tracing::trace!("Tree for node sid:{} not yet ready", source); + if mode.future() { + face_hat_mut!(face) + .remote_sub_interests + .insert(id, (res.cloned(), aggregate)); + } + } + + fn undeclare_sub_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_sub_interests.remove(&id); } -} -impl HatPubSubTrait for HatCode { fn declare_subscription( &self, tables: &mut Tables, @@ -973,6 +1153,43 @@ impl HatPubSubTrait for HatCode { source: NodeId, source_type: WhatAmI, ) -> Arc { + #[inline] + fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: NodeId, + subs: &HashSet, + ) { + if net.trees.len() > source as usize { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source as usize].directions.len() > sub_idx.index() { + if let Some(direction) = + net.trees[source as usize].directions[sub_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = Resource::get_best_key( + expr.prefix, + expr.suffix, + face.id, + ); + (face.clone(), key_expr.to_owned(), source) + }); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + let mut route = HashMap::new(); let key_expr = expr.full_expr(); if key_expr.ends_with('/') { @@ -1064,4 +1281,91 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } + + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + #[inline] + fn insert_faces_for_subs( + route: &mut HashMap>, + tables: &Tables, + net: &Network, + source: usize, + subs: &HashSet, + ) { + if net.trees.len() > source { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source].directions[sub_idx.index()] { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| face.clone()); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + let master = !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + if master { + let net = hat!(tables).routers_net.as_ref().unwrap(); + insert_faces_for_subs( + &mut matching_subscriptions, + tables, + net, + net.idx.index(), + &res_hat!(mres).router_subs, + ); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); + insert_faces_for_subs( + &mut matching_subscriptions, + tables, + net, + net.idx.index(), + &res_hat!(mres).peer_subs, + ); + } + + if master { + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() && context.face.whatami != WhatAmI::Router { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 9defb80081..72e3a781e5 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -28,9 +28,12 @@ use zenoh_protocol::{ }, WhatAmI, WireExpr, ZenohId, }, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareQueryable, QueryableId, UndeclareQueryable, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; @@ -46,7 +49,7 @@ use crate::net::routing::{ resource::{NodeId, Resource, SessionContext}, tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, - hat::{HatQueriesTrait, Sources}, + hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, router::RoutesIndexes, RoutingContext, PREFIX_LIVELINESS, }; @@ -238,6 +241,11 @@ fn propagate_simple_queryable( let current = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current.is_none() || current.unwrap().1 != info) + && (dst_face.whatami != WhatAmI::Client + || face_hat!(dst_face) + .remote_qabl_interests + .values() + .any(|si| si.as_ref().map(|si| si.matches(res)).unwrap_or(true))) && if full_peers_net { dst_face.whatami == WhatAmI::Client } else { @@ -404,17 +412,11 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } face_hat_mut!(face).remote_qabls.insert(id, res.clone()); @@ -465,6 +467,13 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_client_qabls(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) +} + #[inline] fn send_forget_sourced_queryable_to_net_childs( tables: &Tables, @@ -505,8 +514,8 @@ fn send_forget_sourced_queryable_to_net_childs( } fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { - for face in tables.faces.values_mut() { - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + for mut face in tables.faces.values().cloned() { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { interest_id: None, @@ -521,6 +530,37 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_qabls(&m, &face) + || remote_peer_qabls(tables, &m) + || remote_router_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } @@ -707,7 +747,7 @@ pub(super) fn undeclare_client_queryable( } if client_qabls.len() == 1 && !router_qabls && !peer_qabls { - let face = &mut client_qabls[0]; + let mut face = &mut client_qabls[0]; if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { face.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -723,6 +763,37 @@ pub(super) fn undeclare_client_queryable( res.expr(), )); } + for res in face_hat!(face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_client_qabls(&m, face) + || remote_peer_qabls(tables, &m) + || remote_router_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + )); + } + } + } } } } @@ -741,32 +812,7 @@ fn forget_client_queryable( } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { - if face.whatami == WhatAmI::Client { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - interest_id: None, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) @@ -864,7 +910,8 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if let Some(id) = face_hat!(dst_face).local_subs.get(res).cloned() { + if let Some((id, _)) = face_hat!(dst_face).local_qabls.get(res).cloned() + { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = hat!(tables) @@ -1021,6 +1068,140 @@ lazy_static::lazy_static! { } impl HatQueriesTrait for HatCode { + fn declare_qabl_interest( + &self, + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + ) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = mode.future().then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).router_qabls.iter().any(|qabl| { + qabl.context.is_some() + && qabl.matches(res) + && (remote_client_qabls(qabl, face) + || remote_peer_qabls(tables, qabl) + || remote_router_qabls(tables, qabl)) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + )); + } + } else { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && qabl.matches(res) + && (remote_client_qabls(qabl, face) + || remote_peer_qabls(tables, qabl) + || remote_router_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } else { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && (remote_client_qabls(qabl, face) + || remote_peer_qabls(tables, qabl) + || remote_router_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } + if mode.future() { + face_hat_mut!(face) + .remote_qabl_interests + .insert(id, res.cloned()); + } + } + + fn undeclare_qabl_interest( + &self, + _tables: &mut Tables, + face: &mut Arc, + id: InterestId, + ) { + face_hat_mut!(face).remote_qabl_interests.remove(&id); + } + fn declare_queryable( &self, tables: &mut Tables, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 62f6b7c8b4..12c1f26fdb 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -499,6 +499,11 @@ impl Primitives for AdminSpace { } impl crate::net::primitives::EPrimitives for AdminSpace { + #[inline] + fn send_interest(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_interest(ctx.msg) + } + #[inline] fn send_declare(&self, ctx: crate::net::routing::RoutingContext) { (self as &dyn Primitives).send_declare(ctx.msg) diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 5f04b73d53..1622c1eb52 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -534,6 +534,8 @@ impl Primitives for ClientPrimitives { } impl EPrimitives for ClientPrimitives { + fn send_interest(&self, _ctx: RoutingContext) {} + fn send_declare(&self, ctx: RoutingContext) { match ctx.msg.body { DeclareBody::DeclareKeyExpr(d) => { From 4e16dc9ae23ee463a19a4740507f7548b37feef1 Mon Sep 17 00:00:00 2001 From: yellowhatter Date: Tue, 14 May 2024 00:13:55 +0300 Subject: [PATCH 343/357] add forgotten types to prelude --- zenoh/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e48388f5e7..e9da14328f 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -397,6 +397,7 @@ pub mod shm { protocol_id::POSIX_PROTOCOL_ID, }, provider::{ + chunk::{AllocatedChunk, ChunkDescriptor}, shared_memory_provider::{ AllocBuilder, AllocBuilder2, AllocLayout, AllocLayoutSizedBuilder, AllocPolicy, AsyncAllocPolicy, BlockOn, DeallocEldest, DeallocOptimal, DeallocYoungest, @@ -405,6 +406,7 @@ pub mod shm { SharedMemoryProviderBuilderBackendID, SharedMemoryProviderBuilderID, StaticProtocolID, }, + shared_memory_provider_backend::SharedMemoryProviderBackend, types::{ AllocAlignment, BufAllocResult, BufLayoutAllocResult, ChunkAllocResult, MemoryLayout, ZAllocError, ZLayoutAllocError, ZLayoutError, From c853ebaead550f0179b110ca24325759e7e95d6e Mon Sep 17 00:00:00 2001 From: Mahmoud Mazouz Date: Tue, 14 May 2024 14:23:15 +0200 Subject: [PATCH 344/357] Apply Clippy lints from Rust 1.78 (#1020) * fix: Add step to update Stable Rust toolchain When GitHub runners images update their Rust stable channel, this update may not be propagated to all runners at the same time. In such a case CI jobs will use different Rust versions which leads to inconsistent behavior. * fix: Apply clippy lints * fix: Restore deleted `utfdbg` macro * fix: Workaround `clippy:assigning_clones` in `KeyExprFuzzer` --- .github/workflows/ci.yml | 3 ++ Cargo.toml | 4 +-- commons/zenoh-codec/src/lib.rs | 12 ++++++++ commons/zenoh-codec/src/transport/batch.rs | 6 ++++ commons/zenoh-keyexpr/src/key_expr/fuzzer.rs | 4 +-- commons/zenoh-keyexpr/src/key_expr/utils.rs | 30 ++++--------------- .../src/keyexpr_tree/box_tree.rs | 26 +--------------- .../zenoh-keyexpr/src/keyexpr_tree/test.rs | 6 ++-- .../zenoh-protocol/src/common/extension.rs | 6 ++++ commons/zenoh-protocol/src/zenoh/mod.rs | 6 ++++ .../src/unicast/lowlatency/transport.rs | 5 ---- .../src/unicast/transport_unicast_inner.rs | 1 - .../src/unicast/universal/transport.rs | 28 ----------------- plugins/zenoh-backend-traits/src/lib.rs | 1 - plugins/zenoh-plugin-rest/src/lib.rs | 20 ------------- .../src/replica/align_queryable.rs | 8 ++--- zenoh/src/net/primitives/mod.rs | 4 --- zenoh/src/net/primitives/mux.rs | 8 ----- zenoh/src/net/routing/dispatcher/face.rs | 2 +- zenoh/src/net/routing/hat/client/mod.rs | 4 --- .../src/net/routing/hat/linkstate_peer/mod.rs | 4 --- .../net/routing/hat/linkstate_peer/network.rs | 6 ++-- zenoh/src/net/routing/hat/mod.rs | 2 -- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 4 +-- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 4 --- zenoh/src/net/routing/hat/router/mod.rs | 4 --- zenoh/src/net/routing/hat/router/network.rs | 6 ++-- zenoh/src/net/runtime/adminspace.rs | 5 ---- zenoh/src/net/tests/tables.rs | 2 -- zenoh/src/session.rs | 7 +---- 30 files changed, 60 insertions(+), 168 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a8552d58dd..1ef3710bd0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -41,6 +41,9 @@ jobs: - name: Setup rust-cache uses: Swatinem/rust-cache@v2 + - name: Update Stable Rust toolchain + run: rustup update stable + - name: Install dependencies run: cargo +stable install cargo-deny --locked diff --git a/Cargo.toml b/Cargo.toml index dc24991488..9036521c78 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ members = [ exclude = ["ci/nostd-check", "ci/valgrind-check"] [workspace.package] -rust-version = "1.66.1" +rust-version = "1.72.0" version = "0.11.0-dev" # Zenoh version repository = "https://github.com/eclipse-zenoh/zenoh" homepage = "http://zenoh.io" @@ -91,7 +91,7 @@ crc = "3.0.1" criterion = "0.5" derive_more = "0.99.17" derive-new = "0.6.0" -tracing-subscriber = {version = "0.3", features = ["json", "env-filter"]} +tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } tracing-loki = "0.2" event-listener = "4.0.0" flume = "0.11" diff --git a/commons/zenoh-codec/src/lib.rs b/commons/zenoh-codec/src/lib.rs index 14b41f6b0b..9e0193f9e2 100644 --- a/commons/zenoh-codec/src/lib.rs +++ b/commons/zenoh-codec/src/lib.rs @@ -48,6 +48,12 @@ pub trait LCodec { #[derive(Clone, Copy)] pub struct Zenoh080; +impl Default for Zenoh080 { + fn default() -> Self { + Self::new() + } +} + impl Zenoh080 { pub const fn new() -> Self { Self @@ -119,6 +125,12 @@ pub struct Zenoh080Bounded { _t: PhantomData, } +impl Default for Zenoh080Bounded { + fn default() -> Self { + Self::new() + } +} + impl Zenoh080Bounded { pub const fn new() -> Self { Self { _t: PhantomData } diff --git a/commons/zenoh-codec/src/transport/batch.rs b/commons/zenoh-codec/src/transport/batch.rs index 525336d6e8..d0774e0f30 100644 --- a/commons/zenoh-codec/src/transport/batch.rs +++ b/commons/zenoh-codec/src/transport/batch.rs @@ -53,6 +53,12 @@ pub struct Zenoh080Batch { pub latest_sn: LatestSn, } +impl Default for Zenoh080Batch { + fn default() -> Self { + Self::new() + } +} + impl Zenoh080Batch { pub const fn new() -> Self { Self { diff --git a/commons/zenoh-keyexpr/src/key_expr/fuzzer.rs b/commons/zenoh-keyexpr/src/key_expr/fuzzer.rs index 869b7b63a1..c764acc8bb 100644 --- a/commons/zenoh-keyexpr/src/key_expr/fuzzer.rs +++ b/commons/zenoh-keyexpr/src/key_expr/fuzzer.rs @@ -56,8 +56,8 @@ impl Iterator for KeyExprFuzzer { let mut next = Vec::new(); make(&mut next, &mut self.0); let mut next = String::from_utf8(next).unwrap(); - if let Some(n) = next.strip_prefix('/') { - next = n.to_owned() + if let Some(n) = next.strip_prefix('/').map(ToOwned::to_owned) { + next = n } Some(OwnedKeyExpr::autocanonize(next).unwrap()) } diff --git a/commons/zenoh-keyexpr/src/key_expr/utils.rs b/commons/zenoh-keyexpr/src/key_expr/utils.rs index 68a24f798a..628477174a 100644 --- a/commons/zenoh-keyexpr/src/key_expr/utils.rs +++ b/commons/zenoh-keyexpr/src/key_expr/utils.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use core::{ptr, str}; +use core::ptr; pub(crate) struct Writer { pub ptr: *mut u8, @@ -118,7 +118,6 @@ impl<'a, S: Split + ?Sized, D: ?Sized> DoubleEndedIterator for Splitter<'a, S pub trait Split { fn split_once<'a>(&'a self, delimiter: &Delimiter) -> (&'a Self, &'a Self); fn try_split_once<'a>(&'a self, delimiter: &Delimiter) -> (&'a Self, Option<&'a Self>); - fn rsplit_once<'a>(&'a self, delimiter: &Delimiter) -> (&'a Self, &'a Self); fn try_rsplit_once<'a>(&'a self, delimiter: &Delimiter) -> (Option<&'a Self>, &'a Self); fn splitter<'a>(&'a self, delimiter: &'a Delimiter) -> Splitter<'a, Self, Delimiter> { Splitter { @@ -134,12 +133,6 @@ impl Split for [u8] { None => (self, &[]), } } - fn rsplit_once<'a>(&'a self, delimiter: &u8) -> (&'a Self, &'a Self) { - match self.iter().rposition(|c| c == delimiter) { - Some(i) => (&self[..i], &self[(i + 1)..]), - None => (&[], self), - } - } fn try_split_once<'a>(&'a self, delimiter: &u8) -> (&'a Self, Option<&'a Self>) { match self.iter().position(|c| c == delimiter) { @@ -164,14 +157,6 @@ impl Split<[u8]> for [u8] { } (self, &[]) } - fn rsplit_once<'a>(&'a self, delimiter: &[u8]) -> (&'a Self, &'a Self) { - for i in (0..self.len()).rev() { - if self[..i].ends_with(delimiter) { - return (&self[..(i - delimiter.len())], &self[i..]); - } - } - (&[], self) - } fn try_split_once<'a>(&'a self, delimiter: &[u8]) -> (&'a Self, Option<&'a Self>) { for i in 0..self.len() { @@ -200,14 +185,6 @@ impl Split<[u8; N]> for [u8] { } (self, &[]) } - fn rsplit_once<'a>(&'a self, delimiter: &[u8; N]) -> (&'a Self, &'a Self) { - for i in (0..self.len()).rev() { - if self[..i].ends_with(delimiter) { - return (&self[..(i - delimiter.len())], &self[i..]); - } - } - (&[], self) - } fn try_split_once<'a>(&'a self, delimiter: &[u8; N]) -> (&'a Self, Option<&'a Self>) { for i in 0..self.len() { @@ -228,12 +205,15 @@ impl Split<[u8; N]> for [u8] { } } +#[allow(dead_code)] pub(crate) trait Utf { fn utf(&self) -> &str; } + +#[allow(dead_code)] impl Utf for [u8] { fn utf(&self) -> &str { - unsafe { str::from_utf8_unchecked(self) } + unsafe { ::core::str::from_utf8_unchecked(self) } } } /// This macro is generally useful when introducing new matchers to debug them. diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs index 5aa23e78ac..67777aaa90 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs @@ -18,12 +18,8 @@ use alloc::string::String; use core::ptr::NonNull; use crate::keyexpr; -use crate::keyexpr_tree::{ - support::{IWildness, NonWild, UnknownWildness}, - *, -}; +use crate::keyexpr_tree::{support::IWildness, *}; -use super::impls::KeyedSetProvider; use super::support::IterOrOption; /// A fully owned KeTree. @@ -402,26 +398,6 @@ impl>> AsMut< } } -trait TransmuteInto { - fn transmute_into(self) -> T; -} -impl<'a, Weight: 'static> - TransmuteInto<&'a mut KeyExprTreeNode> - for &'a mut KeyExprTreeNode -{ - fn transmute_into(self) -> &'a mut KeyExprTreeNode { - unsafe { core::mem::transmute(self) } - } -} -impl<'a, Weight: 'static> - TransmuteInto<&'a KeyExprTreeNode> - for &'a KeyExprTreeNode -{ - fn transmute_into(self) -> &'a KeyExprTreeNode { - unsafe { core::mem::transmute(self) } - } -} - impl< 'a, K: AsRef, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs index fc2372a67b..2d916d9a84 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs @@ -163,7 +163,7 @@ fn test_keyset + Debug>(keys: &[K]) { assert!(expected.insert(k, v).is_none()); } } - exclone = expected.clone(); + exclone.clone_from(&expected); for node in tree.included_nodes(target) { let ke = node.keyexpr(); let weight = node.weight(); @@ -203,7 +203,7 @@ fn test_keyset + Debug>(keys: &[K]) { assert!(expected.insert(k, v).is_none()); } } - exclone = expected.clone(); + exclone.clone_from(&expected); for node in tree.nodes_including(dbg!(target)) { let ke = node.keyexpr(); let weight = node.weight(); @@ -302,7 +302,7 @@ fn test_keyset_vec>(keys: &[K]) { assert!(expected.insert(k, v).is_none()); } } - exclone = expected.clone(); + exclone.clone_from(&expected); for node in tree.included_nodes(target) { let ke = node.keyexpr(); let weight = node.weight(); diff --git a/commons/zenoh-protocol/src/common/extension.rs b/commons/zenoh-protocol/src/common/extension.rs index f61df61cc6..3c0fd881df 100644 --- a/commons/zenoh-protocol/src/common/extension.rs +++ b/commons/zenoh-protocol/src/common/extension.rs @@ -110,6 +110,12 @@ pub struct DidntConvert; #[derive(Clone, Copy, PartialEq, Eq)] pub struct ZExtUnit; +impl Default for ZExtUnit<{ ID }> { + fn default() -> Self { + Self::new() + } +} + impl ZExtUnit<{ ID }> { pub const ID: u8 = ID; diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index e67576e673..f2c38c3a1d 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -219,6 +219,12 @@ pub mod ext { Self } } + #[cfg(feature = "shared-memory")] + impl Default for ShmType<{ ID }> { + fn default() -> Self { + Self::new() + } + } /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index dcc9fc8476..cac2e1c4c2 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -262,11 +262,6 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: Link, reason: u8) -> ZResult<()> { - tracing::trace!("Closing link {} with peer: {}", link, self.config.zid); - self.finalize(reason).await - } - async fn close(&self, reason: u8) -> ZResult<()> { tracing::trace!("Closing transport with peer: {}", self.config.zid); self.finalize(reason).await diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index f6dc39529d..789a2fe79d 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -80,7 +80,6 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: Link, reason: u8) -> ZResult<()>; async fn close(&self, reason: u8) -> ZResult<()>; fn add_debug_fields<'a, 'b: 'a, 'c>( diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 58acd5c4b2..1004381a44 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -37,13 +37,6 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; -macro_rules! zlinkget { - ($guard:expr, $link:expr) => { - // Compare LinkUnicast link to not compare TransportLinkUnicast direction - $guard.iter().find(|tl| tl.link == $link) - }; -} - macro_rules! zlinkindex { ($guard:expr, $link:expr) => { // Compare LinkUnicast link to not compare TransportLinkUnicast direction @@ -356,27 +349,6 @@ impl TransportUnicastTrait for TransportUnicastUniversal { /*************************************/ /* TERMINATION */ /*************************************/ - async fn close_link(&self, link: Link, reason: u8) -> ZResult<()> { - tracing::trace!("Closing link {} with peer: {}", link, self.config.zid); - - let transport_link_pipeline = zlinkget!(zread!(self.links), link) - .ok_or_else(|| zerror!("Cannot close Link {:?}: not found", link))? - .pipeline - .clone(); - - // Close message to be sent on the target link - let msg: TransportMessage = Close { - reason, - session: false, - } - .into(); - - transport_link_pipeline.push_transport_message(msg, Priority::Background); - - // Remove the link from the channel - self.del_link(link).await - } - async fn close(&self, reason: u8) -> ZResult<()> { tracing::trace!("Closing transport with peer: {}", self.config.zid); diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 8b9fa359e0..1660d83c3d 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -183,7 +183,6 @@ pub enum History { All, } -/// pub enum StorageInsertionResult { Outdated, Inserted, diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index aff09f7198..fa5c3c6ac3 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -193,26 +193,6 @@ fn response(status: StatusCode, content_type: impl TryInto, body: &str) -> zenoh_plugin_trait::declare_plugin!(RestPlugin); pub struct RestPlugin {} -#[derive(Clone, Copy, Debug)] -struct StrError { - err: &'static str, -} -impl std::error::Error for StrError {} -impl std::fmt::Display for StrError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.err) - } -} -#[derive(Debug, Clone)] -struct StringError { - err: String, -} -impl std::error::Error for StringError {} -impl std::fmt::Display for StringError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.err) - } -} impl ZenohPlugin for RestPlugin {} diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 3a68aa3bda..76a086d352 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -177,11 +177,11 @@ impl AlignQueryable { fn parse_selector(&self, selector: Selector) -> Option { let properties = selector.parameters_stringmap().unwrap(); // note: this is a hashmap tracing::trace!("[ALIGN QUERYABLE] Properties are: {:?}", properties); - if properties.get(super::ERA).is_some() { + if properties.contains_key(super::ERA) { Some(AlignComponent::Era( EraType::from_str(properties.get(super::ERA).unwrap()).unwrap(), )) - } else if properties.get(super::INTERVALS).is_some() { + } else if properties.contains_key(super::INTERVALS) { let mut intervals = properties.get(super::INTERVALS).unwrap().to_string(); intervals.remove(0); intervals.pop(); @@ -191,7 +191,7 @@ impl AlignQueryable { .map(|x| x.parse::().unwrap()) .collect::>(), )) - } else if properties.get(super::SUBINTERVALS).is_some() { + } else if properties.contains_key(super::SUBINTERVALS) { let mut subintervals = properties.get(super::SUBINTERVALS).unwrap().to_string(); subintervals.remove(0); subintervals.pop(); @@ -201,7 +201,7 @@ impl AlignQueryable { .map(|x| x.parse::().unwrap()) .collect::>(), )) - } else if properties.get(super::CONTENTS).is_some() { + } else if properties.contains_key(super::CONTENTS) { let contents = serde_json::from_str(properties.get(super::CONTENTS).unwrap()).unwrap(); Some(AlignComponent::Contents(contents)) } else { diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs index fd85280be0..30c96f400e 100644 --- a/zenoh/src/net/primitives/mod.rs +++ b/zenoh/src/net/primitives/mod.rs @@ -48,8 +48,6 @@ pub(crate) trait EPrimitives: Send + Sync { fn send_response(&self, ctx: RoutingContext); fn send_response_final(&self, ctx: RoutingContext); - - fn send_close(&self); } #[derive(Default)] @@ -80,8 +78,6 @@ impl EPrimitives for DummyPrimitives { fn send_response_final(&self, _ctx: RoutingContext) {} - fn send_close(&self) {} - fn as_any(&self) -> &dyn Any { self } diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 4ef0b9eb44..3f96ae7890 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -290,10 +290,6 @@ impl EPrimitives for Mux { } } - fn send_close(&self) { - // self.handler.closing().await; - } - fn as_any(&self) -> &dyn std::any::Any { self } @@ -566,10 +562,6 @@ impl EPrimitives for McastMux { } } - fn send_close(&self) { - // self.handler.closing().await; - } - fn as_any(&self) -> &dyn std::any::Any { self } diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index a31eb9d8ab..f2def1d20a 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -106,7 +106,7 @@ impl FaceState { pub(crate) fn get_next_local_id(&self) -> ExprId { let mut id = 1; - while self.local_mappings.get(&id).is_some() || self.remote_mappings.get(&id).is_some() { + while self.local_mappings.contains_key(&id) || self.remote_mappings.contains_key(&id) { id += 1; } id diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index aa83c34f5d..c19faf39f8 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -252,10 +252,6 @@ impl HatBaseTrait for HatCode { Ok(()) } - fn as_any(&self) -> &dyn Any { - self - } - #[inline] fn ingress_filter(&self, _tables: &Tables, _face: &FaceState, _expr: &mut RoutingExpr) -> bool { true diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 2bf22489de..d75c8faf1f 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -431,10 +431,6 @@ impl HatBaseTrait for HatCode { Ok(()) } - fn as_any(&self) -> &dyn Any { - self - } - #[inline] fn ingress_filter(&self, _tables: &Tables, _face: &FaceState, _expr: &mut RoutingExpr) -> bool { true diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 3fd9f53420..16844643c4 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -460,10 +460,10 @@ impl Network { (oldsn < sn) .then(|| { node.sn = sn; - node.links = links.clone(); + node.links.clone_from(&links); changes.updated_nodes.push((idx, node.clone())); (node.locators != locators && locators.is_some()).then(|| { - node.locators = locators.clone(); + node.locators.clone_from(&locators); idx }) }) @@ -524,7 +524,7 @@ impl Network { let oldsn = node.sn; if oldsn < sn { node.sn = sn; - node.links = links.clone(); + node.links.clone_from(&links); if locators.is_some() { node.locators = locators; } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 2752a80959..82f2a6746e 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -67,8 +67,6 @@ impl Sources { pub(crate) trait HatTrait: HatBaseTrait + HatPubSubTrait + HatQueriesTrait {} pub(crate) trait HatBaseTrait { - fn as_any(&self) -> &dyn Any; - fn init(&self, tables: &mut Tables, runtime: Runtime); fn new_tables(&self, router_peers_failover_brokering: bool) -> Box; diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index 8ce3bb4792..38a833d1c2 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -381,9 +381,9 @@ impl Network { (oldsn < sn) .then(|| { node.sn = sn; - node.links = links.clone(); + node.links.clone_from(&links); (node.locators != locators && locators.is_some()).then(|| { - node.locators = locators.clone(); + node.locators.clone_from(&locators); idx }) }) diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index ea3e1e9550..294932fe24 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -325,10 +325,6 @@ impl HatBaseTrait for HatCode { Ok(()) } - fn as_any(&self) -> &dyn Any { - self - } - #[inline] fn ingress_filter(&self, _tables: &Tables, _face: &FaceState, _expr: &mut RoutingExpr) -> bool { true diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 4aaec7bf3b..0762126edc 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -706,10 +706,6 @@ impl HatBaseTrait for HatCode { Ok(()) } - fn as_any(&self) -> &dyn Any { - self - } - #[inline] fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { face.whatami != WhatAmI::Peer diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index 486e0456ab..5089ce9893 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -463,10 +463,10 @@ impl Network { (oldsn < sn) .then(|| { node.sn = sn; - node.links = links.clone(); + node.links.clone_from(&links); changes.updated_nodes.push((idx, node.clone())); (node.locators != locators && locators.is_some()).then(|| { - node.locators = locators.clone(); + node.locators.clone_from(&locators); idx }) }) @@ -527,7 +527,7 @@ impl Network { let oldsn = node.sn; if oldsn < sn { node.sn = sn; - node.links = links.clone(); + node.links.clone_from(&links); if locators.is_some() { node.locators = locators; } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index a7ac55baf9..78647b3836 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -509,11 +509,6 @@ impl crate::net::primitives::EPrimitives for AdminSpace { (self as &dyn Primitives).send_response_final(ctx.msg) } - #[inline] - fn send_close(&self) { - (self as &dyn Primitives).send_close() - } - fn as_any(&self) -> &dyn std::any::Any { self } diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index ebd6e66681..f5e65f0bdc 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -470,8 +470,6 @@ impl EPrimitives for ClientPrimitives { fn send_response_final(&self, _ctx: RoutingContext) {} - fn send_close(&self) {} - fn as_any(&self) -> &dyn std::any::Any { self } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 9ff91ea990..368cded243 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -1698,7 +1698,7 @@ impl Session { let mut sample = Sample::with_info(key_expr, payload.clone(), info.clone()); #[cfg(feature = "unstable")] { - sample.attachment = attachment.clone(); + sample.attachment.clone_from(&attachment); } cb(sample); } @@ -2721,11 +2721,6 @@ impl crate::net::primitives::EPrimitives for Session { (self as &dyn Primitives).send_response_final(ctx.msg) } - #[inline] - fn send_close(&self) { - (self as &dyn Primitives).send_close() - } - fn as_any(&self) -> &dyn std::any::Any { self } From 763a05f0d61a894cf3cbeb07223e2ea726777325 Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Tue, 14 May 2024 21:03:41 +0800 Subject: [PATCH 345/357] fix: use `self.len()` to prevent from consuming the slices iterator (#1022) --- commons/zenoh-buffers/src/lib.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index abd85d024c..8641ff4afc 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -106,17 +106,10 @@ pub mod buffer { // the iterator has 1 element. Cow::Borrowed(unsafe { slices.next().unwrap_unchecked() }) } - _ => { - let mut l = 0; - for s in slices.by_ref() { - l += s.len(); - } - let mut vec = Vec::with_capacity(l); - for slice in slices { - vec.extend_from_slice(slice); - } - Cow::Owned(vec) - } + _ => Cow::Owned(slices.fold(Vec::with_capacity(self.len()), |mut acc, it| { + acc.extend_from_slice(it); + acc + })), } } } From 7f7260c741709a20d1c520000dd88dcc894381ba Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Wed, 15 May 2024 15:59:13 +0800 Subject: [PATCH 346/357] ci: speedup the cargo deny installation (#1027) --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1ef3710bd0..07307e160e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,8 +44,8 @@ jobs: - name: Update Stable Rust toolchain run: rustup update stable - - name: Install dependencies - run: cargo +stable install cargo-deny --locked + - name: Install latest cargo-deny + uses: taiki-e/install-action@cargo-deny - name: Code format check run: cargo fmt --check From 75aa2739c36533f57b0eef3580256014e445f241 Mon Sep 17 00:00:00 2001 From: Diogo Matsubara Date: Wed, 15 May 2024 10:17:34 +0200 Subject: [PATCH 347/357] fix: set live-run to input's value in dockerhub publish (#1029) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e4e711ee27..25553c2b0a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -160,7 +160,7 @@ jobs: uses: eclipse-zenoh/ci/.github/workflows/release-crates-dockerhub.yml@main with: no-build: true - live-run: true + live-run: ${{ inputs.live-run || false }} version: ${{ needs.tag.outputs.version }} repo: ${{ github.repository }} branch: ${{ needs.tag.outputs.branch }} From ad6a97440c2c46306bacb8eca7bf2f6fc9a2d60e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 15 May 2024 19:23:41 +0200 Subject: [PATCH 348/357] Reduce open time (#971) * Router implements interests protocol for clients * Send WireExpr in UndeclareSubscriber/UndeclareQueryable to clients for pico * Fix WireExprExt M flag encoding/decoding * Fix decl_key * Clients send all samples and queries to routers and peers * Avoid self declaration loop on interest * Fix query/replies copy/paste bugs * Peers implement interests protocol for clients * Don't send WireExpr in UndeclareSubscriber/UndeclareQueryable to clients * Add client writer-side filtering (#863) * Add client writer-side filtering * Reimplement liveliness with interests * Fix writer-side filtering before receiving FinalInterest * Fix pubsub interest based routing after router failover * Declare message can be Push/Request/RequestContinuous/Response * Address review comments * Remove F: Future flag from DeclareInterest * cargo fmt --all * Remove unused Interest flags field * Update doc * Remove unneeded interest_id field * Update commons/zenoh-protocol/src/network/declare.rs * Remove unused UndeclareInterest * Implement proper Declare Request/Response id correlation * Add new Interest network message * Update doc * Update codec * Fix stable build * Fix test_acl * Fix writer side filtering * Add separate functions to compute matching status * Fix unstable imports * Remove useless checks * Don't apply open session delay in client mode * Add open_delay test * Peers don't apply writer side filtering until FinalInterest is received * Don't wait for full scouting delay when peers connected all configured connect endpoints * Increase scouting delay and decrease api open delay * Wait for gossip and related connections attempts before returning to open * Remove random backoff for p2p * Fix memory leak * Remove API_OPEN_DELAY * Don't apply any scouting delay when multicast disabled and no configured connect/endpoints * Sleep for scouting/delay in router and linkstate peer modes --------- Co-authored-by: Luca Cominardi --- commons/zenoh-config/src/defaults.rs | 2 +- zenoh/Cargo.toml | 3 + zenoh/src/api/session.rs | 3 - zenoh/src/net/routing/hat/client/mod.rs | 5 + .../src/net/routing/hat/linkstate_peer/mod.rs | 6 + zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 20 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 42 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 21 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 23 +- zenoh/src/net/routing/hat/router/mod.rs | 6 + zenoh/src/net/runtime/mod.rs | 7 + zenoh/src/net/runtime/orchestrator.rs | 119 ++++- zenoh/tests/open_time.rs | 426 ++++++++++++++++++ 13 files changed, 657 insertions(+), 26 deletions(-) create mode 100644 zenoh/tests/open_time.rs diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 865da7b5ba..138d00bf85 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -36,7 +36,7 @@ pub const mode: WhatAmI = WhatAmI::Peer; #[allow(dead_code)] pub mod scouting { pub const timeout: u64 = 3000; - pub const delay: u64 = 200; + pub const delay: u64 = 500; pub mod multicast { pub const enabled: bool = true; pub const address: ([u8; 4], u16) = ([224, 0, 0, 224], 7446); diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 15996ce620..c07f22fe9f 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -114,6 +114,9 @@ zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } zenoh-task = { workspace = true } +[dev-dependencies] +tokio = { workspace = true } + [build-dependencies] rustc_version = { workspace = true } diff --git a/zenoh/src/api/session.rs b/zenoh/src/api/session.rs index e5087e693b..afb4a4b0d3 100644 --- a/zenoh/src/api/session.rs +++ b/zenoh/src/api/session.rs @@ -99,7 +99,6 @@ zconfigurable! { pub(crate) static ref API_QUERY_RECEPTION_CHANNEL_SIZE: usize = 256; pub(crate) static ref API_REPLY_EMISSION_CHANNEL_SIZE: usize = 256; pub(crate) static ref API_REPLY_RECEPTION_CHANNEL_SIZE: usize = 256; - pub(crate) static ref API_OPEN_SESSION_DELAY: u64 = 500; } pub(crate) struct SessionState { @@ -865,8 +864,6 @@ impl Session { .await; session.owns_runtime = true; runtime.start().await?; - // Workaround for the declare_and_shoot problem - tokio::time::sleep(Duration::from_millis(*API_OPEN_SESSION_DELAY)).await; Ok(session) }) } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 921dc7554c..ccedf8d419 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -120,6 +120,11 @@ impl HatBaseTrait for HatCode { fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + + face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).local_subs.clear(); + face_hat_mut!(face).local_qabls.clear(); + let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index bb5aec4db1..85b65302f0 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -251,6 +251,12 @@ impl HatBaseTrait for HatCode { fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + + face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).local_subs.clear(); + face_hat_mut!(face).remote_qabl_interests.clear(); + face_hat_mut!(face).local_qabls.clear(); + let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index 57b76fc086..79948d0f0b 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -14,7 +14,6 @@ use std::convert::TryInto; use petgraph::graph::NodeIndex; -use rand::Rng; use vec_map::VecMap; use zenoh_buffers::{ writer::{DidntWrite, HasWriter}, @@ -399,6 +398,11 @@ impl Network { if self.gossip { if let Some(idx) = idx { + zenoh_runtime::ZRuntime::Net.block_in_place( + strong_runtime + .start_conditions() + .add_peer_connector_zid(zid), + ); if self.gossip_multihop || self.links.values().any(|link| link.zid == zid) { self.send_on_links( vec![( @@ -424,12 +428,11 @@ impl Network { .await .is_none() { - // random backoff - let sleep_time = std::time::Duration::from_millis( - rand::thread_rng().gen_range(0..100), - ); - tokio::time::sleep(sleep_time).await; runtime.connect_peer(&zid, &locators).await; + runtime + .start_conditions() + .terminate_peer_connector_zid(zid) + .await; } }); } @@ -437,6 +440,11 @@ impl Network { } } } + zenoh_runtime::ZRuntime::Net.block_in_place( + strong_runtime + .start_conditions() + .terminate_peer_connector_zid(src), + ); } pub(super) fn add_link(&mut self, transport: TransportUnicast) -> usize { diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 5ac77a3135..5485213c3c 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -27,10 +27,14 @@ use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ common::ZExtBody, network::{ - declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, - interest::InterestId, + declare::{ + ext::{NodeIdType, QoSType}, + queryable::ext::QueryableInfoType, + QueryableId, SubscriberId, + }, + interest::{InterestId, InterestOptions}, oam::id::OAM_LINKSTATE, - Oam, + Declare, DeclareBody, DeclareFinal, Oam, }, }; use zenoh_result::ZResult; @@ -53,8 +57,9 @@ use crate::net::{ codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::{ - dispatcher::face::Face, + dispatcher::face::{Face, InterestState}, router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + RoutingContext, }, runtime::Runtime, }; @@ -157,14 +162,43 @@ impl HatBaseTrait for HatCode { net.add_link(transport.clone()); } } + if face.state.whatami == WhatAmI::Peer { + get_mut_unchecked(&mut face.state).local_interests.insert( + 0, + InterestState { + options: InterestOptions::ALL, + res: None, + finalized: false, + }, + ); + } + pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); + + if face.state.whatami == WhatAmI::Peer { + face.state + .primitives + .send_declare(RoutingContext::new(Declare { + interest_id: Some(0), + ext_qos: QoSType::default(), + ext_tstamp: None, + ext_nodeid: NodeIdType::default(), + body: DeclareBody::DeclareFinal(DeclareFinal), + })); + } Ok(()) } fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + + face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).local_subs.clear(); + face_hat_mut!(face).remote_qabl_interests.clear(); + face_hat_mut!(face).local_qabls.clear(); + let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 31172e2804..69cb1619b7 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -39,7 +39,7 @@ use crate::{ tables::{Route, RoutingExpr, Tables}, }, hat::{CurrentFutureTrait, HatPubSubTrait, Sources}, - router::RoutesIndexes, + router::{update_data_routes_from, RoutesIndexes}, RoutingContext, PREFIX_LIVELINESS, }, }; @@ -358,6 +358,10 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { } } } + // recompute routes + // TODO: disable data routes and recompute them in parallel to avoid holding + // tables write lock for a long time on peer conenction. + update_data_routes_from(tables, &mut tables.root_res.clone()); } impl HatPubSubTrait for HatCode { @@ -565,6 +569,21 @@ impl HatPubSubTrait for HatCode { return Arc::new(route); } }; + + for face in tables.faces.values().filter(|f| { + f.whatami == WhatAmI::Peer + && !f + .local_interests + .get(&0) + .map(|i| i.finalized) + .unwrap_or(true) + }) { + route.entry(face.id).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + (face.clone(), key_expr.to_owned(), NodeId::default()) + }); + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 1801f66c84..e986cfa16e 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -45,7 +45,7 @@ use crate::net::routing::{ tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, }, hat::{CurrentFutureTrait, HatQueriesTrait, Sources}, - router::RoutesIndexes, + router::{update_query_routes_from, RoutesIndexes}, RoutingContext, PREFIX_LIVELINESS, }; @@ -332,6 +332,10 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { } } } + // recompute routes + // TODO: disable query routes and recompute them in parallel to avoid holding + // tables write lock for a long time on peer conenction. + update_query_routes_from(tables, &mut tables.root_res.clone()); } lazy_static::lazy_static! { @@ -549,6 +553,23 @@ impl HatQueriesTrait for HatCode { return EMPTY_ROUTE.clone(); } }; + + for face in tables.faces.values().filter(|f| { + f.whatami == WhatAmI::Peer + && !f + .local_interests + .get(&0) + .map(|i| i.finalized) + .unwrap_or(true) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: 0.5, + }); + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 54b132f665..40fbc2d588 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -424,6 +424,12 @@ impl HatBaseTrait for HatCode { fn close_face(&self, tables: &TablesLock, face: &mut Arc) { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); + + face_hat_mut!(face).remote_sub_interests.clear(); + face_hat_mut!(face).local_subs.clear(); + face_hat_mut!(face).remote_qabl_interests.clear(); + face_hat_mut!(face).local_qabls.clear(); + let face = get_mut_unchecked(face); for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index f4eb0289ca..515f3f54be 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -54,6 +54,7 @@ use zenoh_transport::{ TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; +use self::orchestrator::StartConditions; use super::{primitives::DeMux, routing, routing::router::Router}; #[cfg(all(feature = "unstable", feature = "plugins"))] use crate::api::loader::{load_plugins, start_plugins}; @@ -78,6 +79,7 @@ pub(crate) struct RuntimeState { task_controller: TaskController, #[cfg(all(feature = "unstable", feature = "plugins"))] plugins_manager: Mutex, + start_conditions: Arc, } pub struct WeakRuntime { @@ -186,6 +188,7 @@ impl RuntimeBuilder { task_controller: TaskController::default(), #[cfg(all(feature = "unstable", feature = "plugins"))] plugins_manager: Mutex::new(plugins_manager), + start_conditions: Arc::new(StartConditions::default()), }), }; *handler.runtime.write().unwrap() = Runtime::downgrade(&runtime); @@ -354,6 +357,10 @@ impl Runtime { pub fn get_cancellation_token(&self) -> CancellationToken { self.state.task_controller.get_cancellation_token() } + + pub(crate) fn start_conditions(&self) -> &Arc { + &self.state.start_conditions + } } struct RuntimeTransportEventHandler { diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 610f189b58..6a6263d6b0 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -18,7 +18,10 @@ use std::{ use futures::prelude::*; use socket2::{Domain, Socket, Type}; -use tokio::net::UdpSocket; +use tokio::{ + net::UdpSocket, + sync::{futures::Notified, Mutex, Notify}, +}; use zenoh_buffers::{ reader::{DidntRead, HasReader}, writer::HasWriter, @@ -48,6 +51,72 @@ pub enum Loop { Break, } +#[derive(Default, Debug)] +pub(crate) struct PeerConnector { + zid: Option, + terminated: bool, +} + +#[derive(Default, Debug)] +pub(crate) struct StartConditions { + notify: Notify, + peer_connectors: Mutex>, +} + +impl StartConditions { + pub(crate) fn notified(&self) -> Notified<'_> { + self.notify.notified() + } + + pub(crate) async fn add_peer_connector(&self) -> usize { + let mut peer_connectors = self.peer_connectors.lock().await; + peer_connectors.push(PeerConnector::default()); + peer_connectors.len() - 1 + } + + pub(crate) async fn add_peer_connector_zid(&self, zid: ZenohId) { + let mut peer_connectors = self.peer_connectors.lock().await; + if !peer_connectors.iter().any(|pc| pc.zid == Some(zid)) { + peer_connectors.push(PeerConnector { + zid: Some(zid), + terminated: false, + }) + } + } + + pub(crate) async fn set_peer_connector_zid(&self, idx: usize, zid: ZenohId) { + let mut peer_connectors = self.peer_connectors.lock().await; + if let Some(peer_connector) = peer_connectors.get_mut(idx) { + peer_connector.zid = Some(zid); + } + } + + pub(crate) async fn terminate_peer_connector(&self, idx: usize) { + let mut peer_connectors = self.peer_connectors.lock().await; + if let Some(peer_connector) = peer_connectors.get_mut(idx) { + peer_connector.terminated = true; + } + if !peer_connectors.iter().any(|pc| !pc.terminated) { + self.notify.notify_one() + } + } + + pub(crate) async fn terminate_peer_connector_zid(&self, zid: ZenohId) { + let mut peer_connectors = self.peer_connectors.lock().await; + if let Some(peer_connector) = peer_connectors.iter_mut().find(|pc| pc.zid == Some(zid)) { + peer_connector.terminated = true; + } else { + peer_connectors.push(PeerConnector { + zid: Some(zid), + terminated: true, + }) + } + if !peer_connectors.iter().any(|pc| !pc.terminated) { + self.notify.notify_one() + } + } +} + impl Runtime { pub async fn start(&mut self) -> ZResult<()> { match self.whatami() { @@ -96,7 +165,7 @@ impl Runtime { } async fn start_peer(&self) -> ZResult<()> { - let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay) = { + let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay, linkstate) = { let guard = &self.state.config.lock(); let listeners = if guard.listen().endpoints().is_empty() { let endpoint: EndPoint = PEER_DEFAULT_LISTENER.parse().unwrap(); @@ -125,6 +194,7 @@ impl Runtime { unwrap_or_default!(guard.scouting().multicast().address()), unwrap_or_default!(guard.scouting().multicast().interface()), Duration::from_millis(unwrap_or_default!(guard.scouting().delay())), + unwrap_or_default!(guard.routing().peer().mode()) == *"linkstate", ) }; @@ -135,12 +205,22 @@ impl Runtime { if scouting { self.start_scout(listen, autoconnect, addr, ifaces).await?; } - tokio::time::sleep(delay).await; + + if linkstate { + tokio::time::sleep(delay).await; + } else if (scouting || !peers.is_empty()) + && tokio::time::timeout(delay, self.state.start_conditions.notified()) + .await + .is_err() + && !peers.is_empty() + { + tracing::warn!("Scouting delay elapsed before start conditions are met."); + } Ok(()) } async fn start_router(&self) -> ZResult<()> { - let (listeners, peers, scouting, listen, autoconnect, addr, ifaces) = { + let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay) = { let guard = self.state.config.lock(); let listeners = if guard.listen().endpoints().is_empty() { let endpoint: EndPoint = ROUTER_DEFAULT_LISTENER.parse().unwrap(); @@ -168,6 +248,7 @@ impl Runtime { *unwrap_or_default!(guard.scouting().multicast().autoconnect().router()), unwrap_or_default!(guard.scouting().multicast().address()), unwrap_or_default!(guard.scouting().multicast().interface()), + Duration::from_millis(unwrap_or_default!(guard.scouting().delay())), ) }; @@ -179,6 +260,7 @@ impl Runtime { self.start_scout(listen, autoconnect, addr, ifaces).await?; } + tokio::time::sleep(delay).await; Ok(()) } @@ -277,7 +359,7 @@ impl Runtime { } } else { // try to connect with retry waiting - self.peer_connector_retry(endpoint).await; + let _ = self.peer_connector_retry(endpoint).await; return Ok(()); } } @@ -309,7 +391,7 @@ impl Runtime { } } else if retry_config.exit_on_failure { // try to connect with retry waiting - self.peer_connector_retry(endpoint).await; + let _ = self.peer_connector_retry(endpoint).await; } else { // try to connect in background self.spawn_peer_connector(endpoint).await? @@ -656,14 +738,31 @@ impl Runtime { .await? { let this = self.clone(); - self.spawn(async move { this.peer_connector_retry(peer).await }); + let idx = self.state.start_conditions.add_peer_connector().await; + let config = this.config().lock(); + let gossip = unwrap_or_default!(config.scouting().gossip().enabled()); + drop(config); + self.spawn(async move { + if let Ok(zid) = this.peer_connector_retry(peer).await { + this.state + .start_conditions + .set_peer_connector_zid(idx, zid) + .await; + } + if !gossip { + this.state + .start_conditions + .terminate_peer_connector(idx) + .await; + } + }); Ok(()) } else { bail!("Forbidden multicast endpoint in connect list!") } } - async fn peer_connector_retry(&self, peer: EndPoint) { + async fn peer_connector_retry(&self, peer: EndPoint) -> ZResult { let retry_config = self.get_connect_retry_config(&peer); let mut period = retry_config.period(); let cancellation_token = self.get_cancellation_token(); @@ -683,7 +782,7 @@ impl Runtime { *zwrite!(orch_transport.endpoint) = Some(peer); } } - break; + return transport.get_zid(); } Ok(Err(e)) => { tracing::debug!( @@ -703,7 +802,7 @@ impl Runtime { } } } - _ = cancellation_token.cancelled() => { break; } + _ = cancellation_token.cancelled() => { bail!(zerror!("Peer connector terminated")); } } tokio::time::sleep(period.next_duration()).await; } diff --git a/zenoh/tests/open_time.rs b/zenoh/tests/open_time.rs new file mode 100644 index 0000000000..87c080bc97 --- /dev/null +++ b/zenoh/tests/open_time.rs @@ -0,0 +1,426 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + future::IntoFuture, + time::{Duration, Instant}, +}; + +use zenoh_config::Config; +use zenoh_link::EndPoint; +use zenoh_protocol::core::WhatAmI; + +const TIMEOUT_EXPECTED: Duration = Duration::from_secs(5); +const SLEEP: Duration = Duration::from_millis(100); + +macro_rules! ztimeout_expected { + ($f:expr) => { + tokio::time::timeout(TIMEOUT_EXPECTED, $f).await.unwrap() + }; +} + +async fn time_open( + listen_endpoint: &EndPoint, + connect_endpoint: &EndPoint, + connect_mode: WhatAmI, + lowlatency: bool, +) { + /* [ROUTER] */ + let mut router_config = Config::default(); + router_config.set_mode(Some(WhatAmI::Router)).unwrap(); + router_config + .listen + .set_endpoints(vec![listen_endpoint.clone()]) + .unwrap(); + router_config + .transport + .unicast + .set_lowlatency(lowlatency) + .unwrap(); + router_config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + + let start = Instant::now(); + let router = ztimeout_expected!(zenoh::open(router_config).into_future()).unwrap(); + println!( + "open(mode:{}, listen_endpoint:{}, lowlatency:{}): {:#?}", + WhatAmI::Router, + listen_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + /* [APP] */ + let mut app_config = Config::default(); + app_config.set_mode(Some(connect_mode)).unwrap(); + app_config + .connect + .set_endpoints(vec![connect_endpoint.clone()]) + .unwrap(); + app_config + .transport + .unicast + .set_lowlatency(lowlatency) + .unwrap(); + app_config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + + /* [1] */ + // Open a transport from the app to the router + let start = Instant::now(); + let app = ztimeout_expected!(zenoh::open(app_config).into_future()).unwrap(); + println!( + "open(mode:{}, connect_endpoint:{}, lowlatency:{}): {:#?}", + connect_mode, + connect_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + /* [2] */ + // Close the open transport on the app + let start = Instant::now(); + ztimeout_expected!(app.close().into_future()).unwrap(); + println!( + "close(mode:{}, connect_endpoint:{}, lowlatency:{}): {:#?}", + connect_mode, + connect_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + /* [3] */ + // Close the router + let start = Instant::now(); + ztimeout_expected!(router.close().into_future()).unwrap(); + println!( + "close(mode:{}, listen_endpoint:{}, lowlatency:{}): {:#?}", + WhatAmI::Router, + listen_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} + +async fn time_universal_open(endpoint: &EndPoint, mode: WhatAmI) { + time_open(endpoint, endpoint, mode, false).await +} + +async fn time_lowlatency_open(endpoint: &EndPoint, mode: WhatAmI) { + time_open(endpoint, endpoint, mode, true).await +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14000).parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only_with_lowlatency_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14100).parse().unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 14010).parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only_with_lowlatency_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 14110).parse().unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} + +// #[cfg(feature = "transport_ws")] +// #[tokio::test(flavor = "multi_thread", worker_threads = 4)] +// #[ignore] +// async fn time_ws_only_open() { +// zenoh_util::try_init_log_from_env(); +// let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14020).parse().unwrap(); +// time_universal_open(&endpoint, WhatAmI::Client).await; +// } + +// #[cfg(feature = "transport_ws")] +// #[tokio::test(flavor = "multi_thread", worker_threads = 4)] +// #[ignore] +// async fn time_ws_only_with_lowlatency_open() { +// zenoh_util::try_init_log_from_env(); +// let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14120).parse().unwrap(); +// time_lowlatency_open(&endpoint, WhatAmI::Client).await; +// } + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only_open".parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only_with_lowlatency_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_open" + .parse() + .unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unix_only_open() { + zenoh_util::try_init_log_from_env(); + let f1 = "zenoh-test-unix-socket-9-open.sock"; + let _ = std::fs::remove_file(f1); + let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; + let _ = std::fs::remove_file(f1); + let _ = std::fs::remove_file(format!("{f1}.lock")); +} + +#[cfg(feature = "transport_tls")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tls_only_open() { + use zenoh_link::tls::config::*; + + zenoh_util::try_init_log_from_env(); + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + let mut endpoint: EndPoint = format!("tls/localhost:{}", 14030).parse().unwrap(); + endpoint + .config_mut() + .extend_from_iter( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .copied(), + ) + .unwrap(); + + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_quic")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_quic_only_open() { + use zenoh_link::quic::config::*; + + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + // Define the locator + let mut endpoint: EndPoint = format!("quic/localhost:{}", 14040).parse().unwrap(); + endpoint + .config_mut() + .extend_from_iter( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .copied(), + ) + .unwrap(); + + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(all(feature = "transport_vsock", target_os = "linux"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_vsock_only_open() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:18000".parse().unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} From fdb86be02184e2d5c4c0661628ffedc5052d9d7b Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 16 May 2024 14:22:36 +0200 Subject: [PATCH 349/357] Fix clippy warning --- zenoh/src/api/encoding.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 29c65f837e..a46b59b6c4 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -723,6 +723,7 @@ impl fmt::Display for Encoding { } } +#[allow(dead_code)] // - Encoding trait pub trait EncodingMapping { const ENCODING: Encoding; From 1015a503c5fc9fe05a2a64620c3f8efb244aef2f Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 17 May 2024 12:19:45 +0200 Subject: [PATCH 350/357] Recompute routes on DeclareFinal to activate writer side filtering --- zenoh/src/net/routing/dispatcher/face.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index c8cf5d8770..6eb9ee5b90 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -335,6 +335,14 @@ impl Primitives for Face { .local_interests .entry(id) .and_modify(|interest| interest.finalized = true); + + // recompute routes + // TODO: disable routes and recompute them in parallel to avoid holding + // tables write lock for a long time. + let mut wtables = zwrite!(self.tables.tables); + let mut root_res = wtables.root_res.clone(); + update_data_routes_from(&mut wtables, &mut root_res); + update_query_routes_from(&mut wtables, &mut root_res); } } } From 25f06bd9e71b4dbed5ba8d75eaa1708693e7b9ec Mon Sep 17 00:00:00 2001 From: Yuyuan Yuan Date: Tue, 21 May 2024 18:03:49 +0800 Subject: [PATCH 351/357] fix(ci): address the issue of `RUSTUP_WINDOWS_PATH_ADD_BIN` change (#1039) --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 07307e160e..2aaf1b0763 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,6 +24,7 @@ on: env: CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse CARGO_PROFILE_DEV_DEBUG: false + RUSTUP_WINDOWS_PATH_ADD_BIN: 1 jobs: check: From df2bc5ad89c2492eea8517652a9e37e7656797d1 Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Wed, 22 May 2024 11:48:20 +0300 Subject: [PATCH 352/357] Fixed stabby ( https://github.com/rust-lang/rust/issues/123281 ) (#1036) * Fixed stabby ( https://github.com/rust-lang/rust/issues/123281 ) * temp: use fixed stabby from branch --------- Co-authored-by: OlivierHecart --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 2ec98a4761..a25aff85d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -149,7 +149,7 @@ serde_cbor = "0.11.2" serde_json = "1.0.114" serde-pickle = "1.1.1" serde_yaml = "0.9.19" -stabby = "4.0.5" +stabby = { git="https://github.com/ZettaScaleLabs/stabby.git", branch="fix_stabby_abi_build" } sha3 = "0.10.6" shared_memory = "0.12.4" shellexpand = "3.0.0" From 127c29fe2bdea1465cb72eb8637fde3f710dfce2 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Wed, 22 May 2024 14:49:27 +0200 Subject: [PATCH 353/357] fix: add missing builders to public API --- zenoh/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 26d87e57e3..4f9d5bd71d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -279,8 +279,8 @@ pub mod publication { pub use crate::api::publication::PublisherRef; pub use crate::api::{ builders::publication::{ - PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, - PublisherDeleteBuilder, + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, + PublisherDeleteBuilder, PublisherPutBuilder, }, publication::{Priority, Publisher, PublisherUndeclaration}, }; From b3848704ad418cad5961de08cd0626e975dfe813 Mon Sep 17 00:00:00 2001 From: Darius Maitia Date: Thu, 23 May 2024 09:50:20 +0200 Subject: [PATCH 354/357] issue(encoding): exposing api::encoding internal values through `mod internals` to allow bindings to access them. --- zenoh/src/api/encoding.rs | 16 ++++++++++++++++ zenoh/src/lib.rs | 2 ++ 2 files changed, 18 insertions(+) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index a46b59b6c4..7a76b98aca 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -836,6 +836,22 @@ impl EncodingMapping for serde_pickle::Value { const ENCODING: Encoding = Encoding::APPLICATION_PYTHON_SERIALIZED_OBJECT; } +pub trait EncodingInternals { + fn id(&self) -> u16; + + fn schema(&self) -> Option<&ZSlice>; +} + +impl EncodingInternals for Encoding { + fn id(&self) -> u16 { + self.0.id + } + + fn schema(&self) -> Option<&ZSlice> { + self.0.schema.as_ref() + } +} + // - Zenoh SHM #[cfg(feature = "shared-memory")] impl EncodingMapping for ZShm { diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index 26d87e57e3..0323938b0d 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -376,6 +376,8 @@ pub mod internal { pub use zenoh_util::{ core::ResolveFuture, zenoh_home, LibLoader, Timed, TimedEvent, Timer, ZENOH_HOME_ENV_VAR, }; + + pub use crate::api::encoding::EncodingInternals; } #[cfg(all(feature = "unstable", feature = "shared-memory"))] From 6c494060dd470c6645319817696bc6d363d0b038 Mon Sep 17 00:00:00 2001 From: Dmitrii Bannov <104833606+yellowhatter@users.noreply.github.com> Date: Thu, 23 May 2024 13:24:16 +0300 Subject: [PATCH 355/357] Fix stabby compilation (#1045) * Fixed stabby ( https://github.com/rust-lang/rust/issues/123281 ) * temp: use fixed stabby from branch * fixed stabby package --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b16bfde62..f792677886 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3733,9 +3733,9 @@ dependencies = [ [[package]] name = "stabby" -version = "4.0.5" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ec04c5825384722310b6a1fd83023bee0bfdc838f7aa3069f0a59e10203836b" +checksum = "c7708f5b0e8bddba162d20fa10c8d17c31a2ec6bba369f7904bb18a8bde49ba2" dependencies = [ "lazy_static", "rustversion", @@ -3744,9 +3744,9 @@ dependencies = [ [[package]] name = "stabby-abi" -version = "4.0.5" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976322da1deb6cc64a8406fd24378b840b1962acaac1978a993131c3838d81b3" +checksum = "1a6e7a8b2ff2c116bfab6afcce0adec14509eb38fd3f231bb97826d01de4021e" dependencies = [ "libc", "rustversion", @@ -3756,9 +3756,9 @@ dependencies = [ [[package]] name = "stabby-macros" -version = "4.0.5" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736712a13ab37b1fa6e073831efca751bbcb31033af4d7308bd5d9d605939183" +checksum = "db97bd3101fab9929a08fa0138d30d46c7a80b9d32bc8a3a00706ba00358a275" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -4436,7 +4436,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "static_assertions", ] diff --git a/Cargo.toml b/Cargo.toml index a25aff85d7..935eacb328 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -149,7 +149,7 @@ serde_cbor = "0.11.2" serde_json = "1.0.114" serde-pickle = "1.1.1" serde_yaml = "0.9.19" -stabby = { git="https://github.com/ZettaScaleLabs/stabby.git", branch="fix_stabby_abi_build" } +stabby = "5.0.1" sha3 = "0.10.6" shared_memory = "0.12.4" shellexpand = "3.0.0" From 959fb6e6e2ecdf8a28874e70a8f7e8602254e52b Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 23 May 2024 14:48:45 +0200 Subject: [PATCH 356/357] Fix Interest Declare replies behavior --- zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs | 2 +- zenoh/src/net/routing/hat/linkstate_peer/queries.rs | 2 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 2 +- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 2 +- zenoh/src/net/routing/hat/router/pubsub.rs | 2 +- zenoh/src/net/routing/hat/router/queries.rs | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 135f899656..67b04661c6 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -622,7 +622,7 @@ impl HatPubSubTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 3d9babbd5d..9c3d502e5f 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -695,7 +695,7 @@ impl HatQueriesTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); if let Some(res) = res.as_ref() { if aggregate { if hat!(tables).peer_qabls.iter().any(|qabl| { diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 69cb1619b7..e46ff3ff16 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -375,7 +375,7 @@ impl HatPubSubTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index e986cfa16e..caa5f79694 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -353,7 +353,7 @@ impl HatQueriesTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); if let Some(res) = res.as_ref() { if aggregate { if tables.faces.values().any(|src_face| { diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 3bfb0fdd6f..2af567d989 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -922,7 +922,7 @@ impl HatPubSubTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 72e3a781e5..9a2beeb001 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -1078,7 +1078,7 @@ impl HatQueriesTrait for HatCode { aggregate: bool, ) { if mode.current() && face.whatami == WhatAmI::Client { - let interest_id = mode.future().then_some(id); + let interest_id = (!mode.future()).then_some(id); if let Some(res) = res.as_ref() { if aggregate { if hat!(tables).router_qabls.iter().any(|qabl| { From b06c58cd931c24a504a010e8d5cd63c7bacca90e Mon Sep 17 00:00:00 2001 From: Darius Maitia Date: Fri, 24 May 2024 12:45:50 +0200 Subject: [PATCH 357/357] issue(encoding): adding `new` function to EncodingInternals --- zenoh/src/api/encoding.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs index 7a76b98aca..f63c339ba3 100644 --- a/zenoh/src/api/encoding.rs +++ b/zenoh/src/api/encoding.rs @@ -840,6 +840,8 @@ pub trait EncodingInternals { fn id(&self) -> u16; fn schema(&self) -> Option<&ZSlice>; + + fn new(id: u16, schema: Option) -> Self; } impl EncodingInternals for Encoding { @@ -850,6 +852,10 @@ impl EncodingInternals for Encoding { fn schema(&self) -> Option<&ZSlice> { self.0.schema.as_ref() } + + fn new(id: u16, schema: Option) -> Self { + Encoding(zenoh_protocol::core::Encoding { id, schema }) + } } // - Zenoh SHM